1 From faec6b6c2cc0219e74569c13f581fc11d8f3fc57 Mon Sep 17 00:00:00 2001
2 From: Florian Fainelli <florian@openwrt.org>
3 Date: Sun, 17 Mar 2013 20:12:10 +0100
4 Subject: [PATCH] UM: fix make headers_install after UAPI header installation
6 Commit 10b63956 (UAPI: Plumb the UAPI Kbuilds into the user
7 header installation and checking) breaks UML make headers_install with
10 $ ARCH=um make headers_install
11 CHK include/generated/uapi/linux/version.h
12 UPD include/generated/uapi/linux/version.h
13 HOSTCC scripts/basic/fixdep
14 WRAP arch/um/include/generated/asm/bug.h
16 WRAP arch/um/include/generated/asm/trace_clock.h
17 SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_32.h
18 SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_64.h
19 SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_x32.h
20 SYSTBL arch/x86/syscalls/../include/generated/asm/syscalls_32.h
21 HOSTCC scripts/unifdef
22 Makefile:912: *** Headers not exportable for the um architecture. Stop.
23 zsh: exit 2 ARCH=um make headers_install
25 The reason for that is because the top-level Makefile does the
27 $(if $(wildcard $(srctree)/arch/$(hdr-arch)/include/uapi/asm/Kbuild),, \
28 $(error Headers not exportable for the $(SRCARCH) architecture))
30 we end-up in the else part of the $(if) statement because UML still uses
31 the old path in arch/um/include/asm/Kbuild. This patch fixes the issue
32 by moving the header files to be in arch/um/include/uapi/asm/ thus
33 making headers_install (and other make targets checking for uapi) to
36 Signed-off-by: Florian Fainelli <florian@openwrt.org>
38 Richard, this has been broken for 3.7+ onwards, if you want me to send
39 you separate patches for 3.7 and 3.8 let me know. Thanks!
41 arch/um/include/{ => uapi}/asm/Kbuild | 0
42 arch/um/include/{ => uapi}/asm/a.out-core.h | 0
43 arch/um/include/{ => uapi}/asm/bugs.h | 0
44 arch/um/include/{ => uapi}/asm/cache.h | 0
45 arch/um/include/{ => uapi}/asm/common.lds.S | 0
46 arch/um/include/{ => uapi}/asm/dma.h | 0
47 arch/um/include/{ => uapi}/asm/fixmap.h | 0
48 arch/um/include/{ => uapi}/asm/irq.h | 0
49 arch/um/include/{ => uapi}/asm/irqflags.h | 0
50 arch/um/include/{ => uapi}/asm/kmap_types.h | 0
51 arch/um/include/{ => uapi}/asm/kvm_para.h | 0
52 arch/um/include/{ => uapi}/asm/mmu.h | 0
53 arch/um/include/{ => uapi}/asm/mmu_context.h | 0
54 arch/um/include/{ => uapi}/asm/page.h | 0
55 arch/um/include/{ => uapi}/asm/pgalloc.h | 0
56 arch/um/include/{ => uapi}/asm/pgtable-2level.h | 0
57 arch/um/include/{ => uapi}/asm/pgtable-3level.h | 0
58 arch/um/include/{ => uapi}/asm/pgtable.h | 0
59 arch/um/include/{ => uapi}/asm/processor-generic.h | 0
60 arch/um/include/{ => uapi}/asm/ptrace-generic.h | 0
61 arch/um/include/{ => uapi}/asm/setup.h | 0
62 arch/um/include/{ => uapi}/asm/smp.h | 0
63 arch/um/include/{ => uapi}/asm/sysrq.h | 0
64 arch/um/include/{ => uapi}/asm/thread_info.h | 0
65 arch/um/include/{ => uapi}/asm/timex.h | 0
66 arch/um/include/{ => uapi}/asm/tlb.h | 0
67 arch/um/include/{ => uapi}/asm/tlbflush.h | 0
68 arch/um/include/{ => uapi}/asm/uaccess.h | 0
69 28 files changed, 0 insertions(+), 0 deletions(-)
70 rename arch/um/include/{ => uapi}/asm/Kbuild (100%)
71 rename arch/um/include/{ => uapi}/asm/a.out-core.h (100%)
72 rename arch/um/include/{ => uapi}/asm/bugs.h (100%)
73 rename arch/um/include/{ => uapi}/asm/cache.h (100%)
74 rename arch/um/include/{ => uapi}/asm/common.lds.S (100%)
75 rename arch/um/include/{ => uapi}/asm/dma.h (100%)
76 rename arch/um/include/{ => uapi}/asm/fixmap.h (100%)
77 rename arch/um/include/{ => uapi}/asm/irq.h (100%)
78 rename arch/um/include/{ => uapi}/asm/irqflags.h (100%)
79 rename arch/um/include/{ => uapi}/asm/kmap_types.h (100%)
80 rename arch/um/include/{ => uapi}/asm/kvm_para.h (100%)
81 rename arch/um/include/{ => uapi}/asm/mmu.h (100%)
82 rename arch/um/include/{ => uapi}/asm/mmu_context.h (100%)
83 rename arch/um/include/{ => uapi}/asm/page.h (100%)
84 rename arch/um/include/{ => uapi}/asm/pgalloc.h (100%)
85 rename arch/um/include/{ => uapi}/asm/pgtable-2level.h (100%)
86 rename arch/um/include/{ => uapi}/asm/pgtable-3level.h (100%)
87 rename arch/um/include/{ => uapi}/asm/pgtable.h (100%)
88 rename arch/um/include/{ => uapi}/asm/processor-generic.h (100%)
89 rename arch/um/include/{ => uapi}/asm/ptrace-generic.h (100%)
90 rename arch/um/include/{ => uapi}/asm/setup.h (100%)
91 rename arch/um/include/{ => uapi}/asm/smp.h (100%)
92 rename arch/um/include/{ => uapi}/asm/sysrq.h (100%)
93 rename arch/um/include/{ => uapi}/asm/thread_info.h (100%)
94 rename arch/um/include/{ => uapi}/asm/timex.h (100%)
95 rename arch/um/include/{ => uapi}/asm/tlb.h (100%)
96 rename arch/um/include/{ => uapi}/asm/tlbflush.h (100%)
97 rename arch/um/include/{ => uapi}/asm/uaccess.h (100%)
99 --- a/arch/um/include/asm/Kbuild
102 -generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
103 -generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
104 -generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
105 -generic-y += switch_to.h clkdev.h
106 -generic-y += trace_clock.h
107 -generic-y += preempt.h
109 -generic-y += barrier.h
110 --- a/arch/um/include/asm/a.out-core.h
113 -/* a.out coredump register dumper
115 - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
116 - * Written by David Howells (dhowells@redhat.com)
118 - * This program is free software; you can redistribute it and/or
119 - * modify it under the terms of the GNU General Public Licence
120 - * as published by the Free Software Foundation; either version
121 - * 2 of the Licence, or (at your option) any later version.
124 -#ifndef __UM_A_OUT_CORE_H
125 -#define __UM_A_OUT_CORE_H
129 -#include <linux/user.h>
132 - * fill in the user structure for an a.out core dump
134 -static inline void aout_dump_thread(struct pt_regs *regs, struct user *u)
138 -#endif /* __KERNEL__ */
139 -#endif /* __UM_A_OUT_CORE_H */
140 --- a/arch/um/include/asm/bugs.h
146 -void check_bugs(void);
149 --- a/arch/um/include/asm/cache.h
152 -#ifndef __UM_CACHE_H
153 -#define __UM_CACHE_H
156 -#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
157 -# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
158 -#elif defined(CONFIG_UML_X86) /* 64-bit */
159 -# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
161 -/* XXX: this was taken from x86, now it's completely random. Luckily only
162 - * affects SMP padding. */
163 -# define L1_CACHE_SHIFT 5
166 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
169 --- a/arch/um/include/asm/common.lds.S
172 -#include <asm-generic/vmlinux.lds.h>
174 - .fini : { *(.fini) } =0x9090
176 - PROVIDE (etext = .);
180 - PROVIDE (sdata = .);
184 - .unprotected : { *(.unprotected) }
186 - PROVIDE (_unprotected_end = .);
189 - .note : { *(.note.*) }
194 - .uml.setup.init : {
195 - __uml_setup_start = .;
197 - __uml_setup_end = .;
201 - __uml_help_start = .;
203 - __uml_help_end = .;
206 - .uml.postsetup.init : {
207 - __uml_postsetup_start = .;
208 - *(.uml.postsetup.init)
209 - __uml_postsetup_end = .;
222 - .con_initcall.init : {
226 - .uml.initcall.init : {
227 - __uml_initcall_start = .;
228 - *(.uml.initcall.init)
229 - __uml_initcall_end = .;
235 - __exitcall_begin = .;
237 - __exitcall_end = .;
241 - __uml_exitcall_begin = .;
242 - *(.uml.exitcall.exit)
243 - __uml_exitcall_end = .;
247 - .altinstructions : {
248 - __alt_instructions = .;
249 - *(.altinstructions)
250 - __alt_instructions_end = .;
252 - .altinstr_replacement : { *(.altinstr_replacement) }
253 - /* .exit.text is discard at runtime, not link time, to deal with references
254 - from .altinstructions and .eh_frame */
255 - .exit.text : { *(.exit.text) }
256 - .exit.data : { *(.exit.data) }
259 - __preinit_array_start = .;
261 - __preinit_array_end = .;
264 - __init_array_start = .;
266 - __init_array_end = .;
269 - __fini_array_start = .;
271 - __fini_array_end = .;
279 --- a/arch/um/include/asm/dma.h
287 -extern unsigned long uml_physmem;
289 -#define MAX_DMA_ADDRESS (uml_physmem)
292 --- a/arch/um/include/asm/fixmap.h
295 -#ifndef __UM_FIXMAP_H
296 -#define __UM_FIXMAP_H
298 -#include <asm/processor.h>
299 -#include <asm/kmap_types.h>
300 -#include <asm/archparam.h>
301 -#include <asm/page.h>
302 -#include <linux/threads.h>
305 - * Here we define all the compile-time 'special' virtual
306 - * addresses. The point is to have a constant address at
307 - * compile time, but to set the physical address only
308 - * in the boot process. We allocate these special addresses
309 - * from the end of virtual memory (0xfffff000) backwards.
310 - * Also this lets us do fail-safe vmalloc(), we
311 - * can guarantee that these special addresses and
312 - * vmalloc()-ed addresses never overlap.
314 - * these 'compile-time allocated' memory buffers are
315 - * fixed-size 4k pages. (or larger if used with an increment
316 - * highger than 1) use fixmap_set(idx,phys) to associate
317 - * physical memory with fixmap indices.
319 - * TLB entries of such buffers will not be flushed across
324 - * on UP currently we will have no trace of the fixmap mechanizm,
325 - * no page table allocations, etc. This might change in the
326 - * future, say framebuffers for the console driver(s) could be
329 -enum fixed_addresses {
330 -#ifdef CONFIG_HIGHMEM
331 - FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
332 - FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
334 - __end_of_fixed_addresses
337 -extern void __set_fixmap (enum fixed_addresses idx,
338 - unsigned long phys, pgprot_t flags);
341 - * used by vmalloc.c.
343 - * Leave one empty page between vmalloc'ed areas and
344 - * the start of the fixmap, and leave one page empty
345 - * at the top of mem..
348 -#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
349 -#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
350 -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
352 -#include <asm-generic/fixmap.h>
355 --- a/arch/um/include/asm/irq.h
363 -#define CONSOLE_IRQ 2
364 -#define CONSOLE_WRITE_IRQ 3
366 -#define UM_ETH_IRQ 5
368 -#define SSL_WRITE_IRQ 7
369 -#define ACCEPT_IRQ 8
370 -#define MCONSOLE_IRQ 9
371 -#define WINCH_IRQ 10
372 -#define SIGIO_WRITE_IRQ 11
373 -#define TELNETD_IRQ 12
374 -#define XTERM_IRQ 13
375 -#define RANDOM_IRQ 14
377 -#define LAST_IRQ RANDOM_IRQ
378 -#define NR_IRQS (LAST_IRQ + 1)
381 --- a/arch/um/include/asm/irqflags.h
384 -#ifndef __UM_IRQFLAGS_H
385 -#define __UM_IRQFLAGS_H
387 -extern int get_signals(void);
388 -extern int set_signals(int enable);
389 -extern void block_signals(void);
390 -extern void unblock_signals(void);
392 -static inline unsigned long arch_local_save_flags(void)
394 - return get_signals();
397 -static inline void arch_local_irq_restore(unsigned long flags)
399 - set_signals(flags);
402 -static inline void arch_local_irq_enable(void)
407 -static inline void arch_local_irq_disable(void)
412 -static inline unsigned long arch_local_irq_save(void)
414 - unsigned long flags;
415 - flags = arch_local_save_flags();
416 - arch_local_irq_disable();
420 -static inline bool arch_irqs_disabled(void)
422 - return arch_local_save_flags() == 0;
426 --- a/arch/um/include/asm/kmap_types.h
430 - * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
431 - * Licensed under the GPL
434 -#ifndef __UM_KMAP_TYPES_H
435 -#define __UM_KMAP_TYPES_H
437 -/* No more #include "asm/arch/kmap_types.h" ! */
439 -#define KM_TYPE_NR 14
442 --- a/arch/um/include/asm/kvm_para.h
445 -#include <asm-generic/kvm_para.h>
446 --- a/arch/um/include/asm/mmu.h
450 - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
451 - * Licensed under the GPL
454 -#ifndef __ARCH_UM_MMU_H
455 -#define __ARCH_UM_MMU_H
458 -#include <asm/mm_context.h>
460 -typedef struct mm_context {
462 - struct uml_arch_mm_context arch;
463 - struct page *stub_pages[2];
466 -extern void __switch_mm(struct mm_id * mm_idp);
468 -/* Avoid tangled inclusion with asm/ldt.h */
469 -extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
470 -extern void free_ldt(struct mm_context *mm);
473 --- a/arch/um/include/asm/mmu_context.h
477 - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
478 - * Licensed under the GPL
481 -#ifndef __UM_MMU_CONTEXT_H
482 -#define __UM_MMU_CONTEXT_H
484 -#include <linux/sched.h>
485 -#include <asm/mmu.h>
487 -extern void uml_setup_stubs(struct mm_struct *mm);
488 -extern void arch_exit_mmap(struct mm_struct *mm);
490 -#define deactivate_mm(tsk,mm) do { } while (0)
492 -extern void force_flush_all(void);
494 -static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
497 - * This is called by fs/exec.c and sys_unshare()
498 - * when the new ->mm is used for the first time.
500 - __switch_mm(&new->context.id);
501 - down_write(&new->mmap_sem);
502 - uml_setup_stubs(new);
503 - up_write(&new->mmap_sem);
506 -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
507 - struct task_struct *tsk)
509 - unsigned cpu = smp_processor_id();
512 - cpumask_clear_cpu(cpu, mm_cpumask(prev));
513 - cpumask_set_cpu(cpu, mm_cpumask(next));
514 - if(next != &init_mm)
515 - __switch_mm(&next->context.id);
519 -static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
521 - uml_setup_stubs(mm);
524 -static inline void enter_lazy_tlb(struct mm_struct *mm,
525 - struct task_struct *tsk)
529 -extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
531 -extern void destroy_context(struct mm_struct *mm);
534 --- a/arch/um/include/asm/page.h
538 - * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
539 - * Copyright 2003 PathScale, Inc.
540 - * Licensed under the GPL
546 -#include <linux/const.h>
548 -/* PAGE_SHIFT determines the page size */
549 -#define PAGE_SHIFT 12
550 -#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
551 -#define PAGE_MASK (~(PAGE_SIZE-1))
553 -#ifndef __ASSEMBLY__
557 -#include <linux/types.h>
558 -#include <asm/vm-flags.h>
561 - * These are used to make use of C type-checking..
564 -#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
565 -#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
567 -#define clear_user_page(page, vaddr, pg) clear_page(page)
568 -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
570 -#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
572 -typedef struct { unsigned long pte_low, pte_high; } pte_t;
573 -typedef struct { unsigned long pmd; } pmd_t;
574 -typedef struct { unsigned long pgd; } pgd_t;
575 -#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
577 -#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
578 -#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
579 -#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
580 -#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
582 - (to).pte_low = (from).pte_low; })
583 -#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
584 -#define pte_set_val(pte, phys, prot) \
585 - ({ (pte).pte_high = (phys) >> 32; \
586 - (pte).pte_low = (phys) | pgprot_val(prot); })
588 -#define pmd_val(x) ((x).pmd)
589 -#define __pmd(x) ((pmd_t) { (x) } )
591 -typedef unsigned long long pfn_t;
592 -typedef unsigned long long phys_t;
596 -typedef struct { unsigned long pte; } pte_t;
597 -typedef struct { unsigned long pgd; } pgd_t;
599 -#ifdef CONFIG_3_LEVEL_PGTABLES
600 -typedef struct { unsigned long pmd; } pmd_t;
601 -#define pmd_val(x) ((x).pmd)
602 -#define __pmd(x) ((pmd_t) { (x) } )
605 -#define pte_val(x) ((x).pte)
608 -#define pte_get_bits(p, bits) ((p).pte & (bits))
609 -#define pte_set_bits(p, bits) ((p).pte |= (bits))
610 -#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
611 -#define pte_copy(to, from) ((to).pte = (from).pte)
612 -#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
613 -#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
615 -typedef unsigned long pfn_t;
616 -typedef unsigned long phys_t;
620 -typedef struct { unsigned long pgprot; } pgprot_t;
622 -typedef struct page *pgtable_t;
624 -#define pgd_val(x) ((x).pgd)
625 -#define pgprot_val(x) ((x).pgprot)
627 -#define __pte(x) ((pte_t) { (x) } )
628 -#define __pgd(x) ((pgd_t) { (x) } )
629 -#define __pgprot(x) ((pgprot_t) { (x) } )
631 -extern unsigned long uml_physmem;
633 -#define PAGE_OFFSET (uml_physmem)
634 -#define KERNELBASE PAGE_OFFSET
636 -#define __va_space (8*1024*1024)
640 -/* Cast to unsigned long before casting to void * to avoid a warning from
641 - * mmap_kmem about cutting a long long down to a void *. Not sure that
642 - * casting is the right thing, but 32-bit UML can't have 64-bit virtual
645 -#define __pa(virt) to_phys((void *) (unsigned long) (virt))
646 -#define __va(phys) to_virt((unsigned long) (phys))
648 -#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
649 -#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
651 -#define pfn_valid(pfn) ((pfn) < max_mapnr)
652 -#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
654 -#include <asm-generic/memory_model.h>
655 -#include <asm-generic/getorder.h>
657 -#endif /* __ASSEMBLY__ */
658 -#endif /* __UM_PAGE_H */
659 --- a/arch/um/include/asm/pgalloc.h
663 - * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
664 - * Copyright 2003 PathScale, Inc.
665 - * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
666 - * Licensed under the GPL
669 -#ifndef __UM_PGALLOC_H
670 -#define __UM_PGALLOC_H
672 -#include <linux/mm.h>
674 -#define pmd_populate_kernel(mm, pmd, pte) \
675 - set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
677 -#define pmd_populate(mm, pmd, pte) \
678 - set_pmd(pmd, __pmd(_PAGE_TABLE + \
679 - ((unsigned long long)page_to_pfn(pte) << \
680 - (unsigned long long) PAGE_SHIFT)))
681 -#define pmd_pgtable(pmd) pmd_page(pmd)
684 - * Allocate and free page tables.
686 -extern pgd_t *pgd_alloc(struct mm_struct *);
687 -extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
689 -extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
690 -extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
692 -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
694 - free_page((unsigned long) pte);
697 -static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
699 - pgtable_page_dtor(pte);
703 -#define __pte_free_tlb(tlb,pte, address) \
705 - pgtable_page_dtor(pte); \
706 - tlb_remove_page((tlb),(pte)); \
709 -#ifdef CONFIG_3_LEVEL_PGTABLES
711 -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
713 - free_page((unsigned long)pmd);
716 -#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
719 -#define check_pgt_cache() do { } while (0)
723 --- a/arch/um/include/asm/pgtable-2level.h
727 - * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
728 - * Copyright 2003 PathScale, Inc.
729 - * Derived from include/asm-i386/pgtable.h
730 - * Licensed under the GPL
733 -#ifndef __UM_PGTABLE_2LEVEL_H
734 -#define __UM_PGTABLE_2LEVEL_H
736 -#include <asm-generic/pgtable-nopmd.h>
738 -/* PGDIR_SHIFT determines what a third-level page table entry can map */
740 -#define PGDIR_SHIFT 22
741 -#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
742 -#define PGDIR_MASK (~(PGDIR_SIZE-1))
745 - * entries per page directory level: the i386 is two-level, so
746 - * we don't really have any PMD directory physically.
748 -#define PTRS_PER_PTE 1024
749 -#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
750 -#define PTRS_PER_PGD 1024
751 -#define FIRST_USER_ADDRESS 0
753 -#define pte_ERROR(e) \
754 - printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
756 -#define pgd_ERROR(e) \
757 - printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
760 -static inline int pgd_newpage(pgd_t pgd) { return 0; }
761 -static inline void pgd_mkuptodate(pgd_t pgd) { }
763 -#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
765 -#define pte_pfn(x) phys_to_pfn(pte_val(x))
766 -#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
767 -#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
770 - * Bits 0 through 4 are taken
772 -#define PTE_FILE_MAX_BITS 27
774 -#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
776 -#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
779 --- a/arch/um/include/asm/pgtable-3level.h
783 - * Copyright 2003 PathScale Inc
784 - * Derived from include/asm-i386/pgtable.h
785 - * Licensed under the GPL
788 -#ifndef __UM_PGTABLE_3LEVEL_H
789 -#define __UM_PGTABLE_3LEVEL_H
791 -#include <asm-generic/pgtable-nopud.h>
793 -/* PGDIR_SHIFT determines what a third-level page table entry can map */
796 -#define PGDIR_SHIFT 30
798 -#define PGDIR_SHIFT 31
800 -#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
801 -#define PGDIR_MASK (~(PGDIR_SIZE-1))
803 -/* PMD_SHIFT determines the size of the area a second-level page table can
807 -#define PMD_SHIFT 21
808 -#define PMD_SIZE (1UL << PMD_SHIFT)
809 -#define PMD_MASK (~(PMD_SIZE-1))
812 - * entries per page directory level
815 -#define PTRS_PER_PTE 512
817 -#define PTRS_PER_PMD 512
818 -#define PTRS_PER_PGD 512
820 -#define PTRS_PER_PMD 1024
821 -#define PTRS_PER_PGD 1024
824 -#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
825 -#define FIRST_USER_ADDRESS 0
827 -#define pte_ERROR(e) \
828 - printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
830 -#define pmd_ERROR(e) \
831 - printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
833 -#define pgd_ERROR(e) \
834 - printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
837 -#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
838 -#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
839 -#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
840 -#define pud_populate(mm, pud, pmd) \
841 - set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
844 -#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
846 -#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
849 -static inline int pgd_newpage(pgd_t pgd)
851 - return(pgd_val(pgd) & _PAGE_NEWPAGE);
854 -static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
857 -#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
859 -#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
863 -extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
865 -static inline void pud_clear (pud_t *pud)
867 - set_pud(pud, __pud(_PAGE_NEWPAGE));
870 -#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
871 -#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
873 -/* Find an entry in the second-level page table.. */
874 -#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
875 - pmd_index(address))
877 -static inline unsigned long pte_pfn(pte_t pte)
879 - return phys_to_pfn(pte_val(pte));
882 -static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
885 - phys_t phys = pfn_to_phys(page_nr);
887 - pte_set_val(pte, phys, pgprot);
891 -static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
893 - return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
897 - * Bits 0 through 3 are taken in the low part of the pte,
898 - * put the 32 bits of offset into the high part.
900 -#define PTE_FILE_MAX_BITS 32
904 -#define pte_to_pgoff(p) ((p).pte >> 32)
906 -#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
910 -#define pte_to_pgoff(pte) ((pte).pte_high)
912 -#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
918 --- a/arch/um/include/asm/pgtable.h
922 - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
923 - * Copyright 2003 PathScale, Inc.
924 - * Derived from include/asm-i386/pgtable.h
925 - * Licensed under the GPL
928 -#ifndef __UM_PGTABLE_H
929 -#define __UM_PGTABLE_H
931 -#include <asm/fixmap.h>
933 -#define _PAGE_PRESENT 0x001
934 -#define _PAGE_NEWPAGE 0x002
935 -#define _PAGE_NEWPROT 0x004
936 -#define _PAGE_RW 0x020
937 -#define _PAGE_USER 0x040
938 -#define _PAGE_ACCESSED 0x080
939 -#define _PAGE_DIRTY 0x100
940 -/* If _PAGE_PRESENT is clear, we use these: */
941 -#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
942 -#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
943 - pte_present gives true */
945 -#ifdef CONFIG_3_LEVEL_PGTABLES
946 -#include <asm/pgtable-3level.h>
948 -#include <asm/pgtable-2level.h>
951 -extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
953 -/* zero page used for uninitialized stuff */
954 -extern unsigned long *empty_zero_page;
956 -#define pgtable_cache_init() do ; while (0)
958 -/* Just any arbitrary offset to the start of the vmalloc VM area: the
959 - * current 8MB value just means that there will be a 8MB "hole" after the
960 - * physical memory until the kernel virtual memory starts. That means that
961 - * any out-of-bounds memory accesses will hopefully be caught.
962 - * The vmalloc() routines leaves a hole of 4kB between each vmalloced
963 - * area for the same reason. ;)
966 -extern unsigned long end_iomem;
968 -#define VMALLOC_OFFSET (__va_space)
969 -#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
970 -#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
971 -#ifdef CONFIG_HIGHMEM
972 -# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
974 -# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
976 -#define MODULES_VADDR VMALLOC_START
977 -#define MODULES_END VMALLOC_END
978 -#define MODULES_LEN (MODULES_VADDR - MODULES_END)
980 -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
981 -#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
982 -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
983 -#define __PAGE_KERNEL_EXEC \
984 - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
985 -#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
986 -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
987 -#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
988 -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
989 -#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
990 -#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
993 - * The i386 can't do page protection for execute, and considers that the same
995 - * Also, write permissions imply read permissions. This is the closest we can
998 -#define __P000 PAGE_NONE
999 -#define __P001 PAGE_READONLY
1000 -#define __P010 PAGE_COPY
1001 -#define __P011 PAGE_COPY
1002 -#define __P100 PAGE_READONLY
1003 -#define __P101 PAGE_READONLY
1004 -#define __P110 PAGE_COPY
1005 -#define __P111 PAGE_COPY
1007 -#define __S000 PAGE_NONE
1008 -#define __S001 PAGE_READONLY
1009 -#define __S010 PAGE_SHARED
1010 -#define __S011 PAGE_SHARED
1011 -#define __S100 PAGE_READONLY
1012 -#define __S101 PAGE_READONLY
1013 -#define __S110 PAGE_SHARED
1014 -#define __S111 PAGE_SHARED
1017 - * ZERO_PAGE is a global shared page that is always zero: used
1018 - * for zero-mapped memory areas etc..
1020 -#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
1022 -#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
1024 -#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
1025 -#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
1027 -#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
1028 -#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
1030 -#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
1031 -#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
1033 -#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
1034 -#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
1036 -#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
1038 -#define pte_page(x) pfn_to_page(pte_pfn(x))
1040 -#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
1043 - * =================================
1044 - * Flags checking section.
1045 - * =================================
1048 -static inline int pte_none(pte_t pte)
1050 - return pte_is_zero(pte);
1054 - * The following only work if pte_present() is true.
1055 - * Undefined behaviour if not..
1057 -static inline int pte_read(pte_t pte)
1059 - return((pte_get_bits(pte, _PAGE_USER)) &&
1060 - !(pte_get_bits(pte, _PAGE_PROTNONE)));
1063 -static inline int pte_exec(pte_t pte){
1064 - return((pte_get_bits(pte, _PAGE_USER)) &&
1065 - !(pte_get_bits(pte, _PAGE_PROTNONE)));
1068 -static inline int pte_write(pte_t pte)
1070 - return((pte_get_bits(pte, _PAGE_RW)) &&
1071 - !(pte_get_bits(pte, _PAGE_PROTNONE)));
1075 - * The following only works if pte_present() is not true.
1077 -static inline int pte_file(pte_t pte)
1079 - return pte_get_bits(pte, _PAGE_FILE);
1082 -static inline int pte_dirty(pte_t pte)
1084 - return pte_get_bits(pte, _PAGE_DIRTY);
1087 -static inline int pte_young(pte_t pte)
1089 - return pte_get_bits(pte, _PAGE_ACCESSED);
1092 -static inline int pte_newpage(pte_t pte)
1094 - return pte_get_bits(pte, _PAGE_NEWPAGE);
1097 -static inline int pte_newprot(pte_t pte)
1099 - return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
1102 -static inline int pte_special(pte_t pte)
1108 - * =================================
1109 - * Flags setting section.
1110 - * =================================
1113 -static inline pte_t pte_mknewprot(pte_t pte)
1115 - pte_set_bits(pte, _PAGE_NEWPROT);
1119 -static inline pte_t pte_mkclean(pte_t pte)
1121 - pte_clear_bits(pte, _PAGE_DIRTY);
1125 -static inline pte_t pte_mkold(pte_t pte)
1127 - pte_clear_bits(pte, _PAGE_ACCESSED);
1131 -static inline pte_t pte_wrprotect(pte_t pte)
1133 - pte_clear_bits(pte, _PAGE_RW);
1134 - return(pte_mknewprot(pte));
1137 -static inline pte_t pte_mkread(pte_t pte)
1139 - pte_set_bits(pte, _PAGE_USER);
1140 - return(pte_mknewprot(pte));
1143 -static inline pte_t pte_mkdirty(pte_t pte)
1145 - pte_set_bits(pte, _PAGE_DIRTY);
1149 -static inline pte_t pte_mkyoung(pte_t pte)
1151 - pte_set_bits(pte, _PAGE_ACCESSED);
1155 -static inline pte_t pte_mkwrite(pte_t pte)
1157 - pte_set_bits(pte, _PAGE_RW);
1158 - return(pte_mknewprot(pte));
1161 -static inline pte_t pte_mkuptodate(pte_t pte)
1163 - pte_clear_bits(pte, _PAGE_NEWPAGE);
1164 - if(pte_present(pte))
1165 - pte_clear_bits(pte, _PAGE_NEWPROT);
1169 -static inline pte_t pte_mknewpage(pte_t pte)
1171 - pte_set_bits(pte, _PAGE_NEWPAGE);
1175 -static inline pte_t pte_mkspecial(pte_t pte)
1180 -static inline void set_pte(pte_t *pteptr, pte_t pteval)
1182 - pte_copy(*pteptr, pteval);
1184 - /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
1185 - * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
1189 - *pteptr = pte_mknewpage(*pteptr);
1190 - if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
1192 -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
1194 -#define __HAVE_ARCH_PTE_SAME
1195 -static inline int pte_same(pte_t pte_a, pte_t pte_b)
1197 - return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
1201 - * Conversion functions: convert a page and protection to a page entry,
1202 - * and a page entry and page directory to the page they refer to.
1205 -#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
1206 -#define __virt_to_page(virt) phys_to_page(__pa(virt))
1207 -#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
1208 -#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
1210 -#define mk_pte(page, pgprot) \
1213 - pte_set_val(pte, page_to_phys(page), (pgprot)); \
1214 - if (pte_present(pte)) \
1215 - pte_mknewprot(pte_mknewpage(pte)); \
1218 -static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
1220 - pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
1225 - * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
1227 - * this macro returns the index of the entry in the pgd page which would
1228 - * control the given virtual address
1230 -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1233 - * pgd_offset() returns a (pgd_t *)
1234 - * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
1236 -#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
1239 - * a shortcut which implies the use of the kernel's pgd, instead
1242 -#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1245 - * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
1247 - * this macro returns the index of the entry in the pmd page which would
1248 - * control the given virtual address
1250 -#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
1251 -#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1253 -#define pmd_page_vaddr(pmd) \
1254 - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
1257 - * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
1259 - * this macro returns the index of the entry in the pte page which would
1260 - * control the given virtual address
1262 -#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
1263 -#define pte_offset_kernel(dir, address) \
1264 - ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
1265 -#define pte_offset_map(dir, address) \
1266 - ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
1267 -#define pte_unmap(pte) do { } while (0)
1270 -extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
1272 -#define update_mmu_cache(vma,address,ptep) do ; while (0)
1274 -/* Encode and de-code a swap entry */
1275 -#define __swp_type(x) (((x).val >> 5) & 0x1f)
1276 -#define __swp_offset(x) ((x).val >> 11)
1278 -#define __swp_entry(type, offset) \
1279 - ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
1280 -#define __pte_to_swp_entry(pte) \
1281 - ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
1282 -#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1284 -#define kern_addr_valid(addr) (1)
1286 -#include <asm-generic/pgtable.h>
1288 -/* Clear a kernel PTE and flush it from the TLB */
1289 -#define kpte_clear_flush(ptep, vaddr) \
1291 - pte_clear(&init_mm, (vaddr), (ptep)); \
1292 - __flush_tlb_one((vaddr)); \
1296 --- a/arch/um/include/asm/processor-generic.h
1300 - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
1301 - * Licensed under the GPL
1304 -#ifndef __UM_PROCESSOR_GENERIC_H
1305 -#define __UM_PROCESSOR_GENERIC_H
1309 -struct task_struct;
1311 -#include <asm/ptrace.h>
1312 -#include <registers.h>
1313 -#include <sysdep/archsetjmp.h>
1315 -#include <linux/prefetch.h>
1319 -struct thread_struct {
1320 - struct pt_regs regs;
1321 - struct pt_regs *segv_regs;
1322 - int singlestep_syscall;
1324 - jmp_buf *fault_catcher;
1325 - struct task_struct *prev_sched;
1326 - struct arch_thread arch;
1327 - jmp_buf switch_buf;
1335 - int (*proc)(void *);
1339 - void (*proc)(void *);
1346 -#define INIT_THREAD \
1348 - .regs = EMPTY_REGS, \
1349 - .fault_addr = NULL, \
1350 - .prev_sched = NULL, \
1351 - .arch = INIT_ARCH_THREAD, \
1352 - .request = { 0 } \
1355 -static inline void release_thread(struct task_struct *task)
1359 -extern unsigned long thread_saved_pc(struct task_struct *t);
1361 -static inline void mm_copy_segments(struct mm_struct *from_mm,
1362 - struct mm_struct *new_mm)
1366 -#define init_stack (init_thread_union.stack)
1369 - * User space process size: 3GB (default).
1371 -extern unsigned long task_size;
1373 -#define TASK_SIZE (task_size)
1376 -#undef STACK_TOP_MAX
1378 -extern unsigned long stacksizelim;
1380 -#define STACK_ROOM (stacksizelim)
1381 -#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
1382 -#define STACK_TOP_MAX STACK_TOP
1384 -/* This decides where the kernel will search for a free chunk of vm
1385 - * space during mmap's.
1387 -#define TASK_UNMAPPED_BASE (0x40000000)
1389 -extern void start_thread(struct pt_regs *regs, unsigned long entry,
1390 - unsigned long stack);
1392 -struct cpuinfo_um {
1393 - unsigned long loops_per_jiffy;
1397 -extern struct cpuinfo_um boot_cpu_data;
1399 -#define my_cpu_data cpu_data[smp_processor_id()]
1402 -extern struct cpuinfo_um cpu_data[];
1403 -#define current_cpu_data cpu_data[smp_processor_id()]
1405 -#define cpu_data (&boot_cpu_data)
1406 -#define current_cpu_data boot_cpu_data
1410 -#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
1411 -extern unsigned long get_wchan(struct task_struct *p);
1414 --- a/arch/um/include/asm/ptrace-generic.h
1418 - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
1419 - * Licensed under the GPL
1422 -#ifndef __UM_PTRACE_GENERIC_H
1423 -#define __UM_PTRACE_GENERIC_H
1425 -#ifndef __ASSEMBLY__
1427 -#include <asm/ptrace-abi.h>
1428 -#include <sysdep/ptrace.h>
1431 - struct uml_pt_regs regs;
1434 -#define arch_has_single_step() (1)
1436 -#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
1438 -#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
1439 -#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
1441 -#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs)
1443 -#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs)
1445 -#define instruction_pointer(regs) PT_REGS_IP(regs)
1447 -struct task_struct;
1449 -extern long subarch_ptrace(struct task_struct *child, long request,
1450 - unsigned long addr, unsigned long data);
1451 -extern unsigned long getreg(struct task_struct *child, int regno);
1452 -extern int putreg(struct task_struct *child, int regno, unsigned long value);
1454 -extern int arch_copy_tls(struct task_struct *new);
1455 -extern void clear_flushed_tls(struct task_struct *task);
1456 -extern void syscall_trace_enter(struct pt_regs *regs);
1457 -extern void syscall_trace_leave(struct pt_regs *regs);
1462 --- a/arch/um/include/asm/setup.h
1465 -#ifndef SETUP_H_INCLUDED
1466 -#define SETUP_H_INCLUDED
1468 -/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the
1469 - * command line, so this choice is ok.
1472 -#define COMMAND_LINE_SIZE 4096
1474 -#endif /* SETUP_H_INCLUDED */
1475 --- a/arch/um/include/asm/smp.h
1483 -#include <linux/bitops.h>
1484 -#include <asm/current.h>
1485 -#include <linux/cpumask.h>
1487 -#define raw_smp_processor_id() (current_thread->cpu)
1489 -#define cpu_logical_map(n) (n)
1490 -#define cpu_number_map(n) (n)
1491 -extern int hard_smp_processor_id(void);
1492 -#define NO_PROC_ID -1
1497 -static inline void smp_cpus_done(unsigned int maxcpus)
1501 -extern struct task_struct *idle_threads[NR_CPUS];
1505 -#define hard_smp_processor_id() 0
1510 --- a/arch/um/include/asm/sysrq.h
1513 -#ifndef __UM_SYSRQ_H
1514 -#define __UM_SYSRQ_H
1516 -struct task_struct;
1517 -extern void show_trace(struct task_struct* task, unsigned long *stack);
1520 --- a/arch/um/include/asm/thread_info.h
1524 - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
1525 - * Licensed under the GPL
1528 -#ifndef __UM_THREAD_INFO_H
1529 -#define __UM_THREAD_INFO_H
1531 -#ifndef __ASSEMBLY__
1533 -#include <asm/types.h>
1534 -#include <asm/page.h>
1535 -#include <asm/uaccess.h>
1537 -struct thread_info {
1538 - struct task_struct *task; /* main task structure */
1539 - struct exec_domain *exec_domain; /* execution domain */
1540 - unsigned long flags; /* low level flags */
1541 - __u32 cpu; /* current CPU */
1542 - int preempt_count; /* 0 => preemptable,
1544 - mm_segment_t addr_limit; /* thread address space:
1545 - 0-0xBFFFFFFF for user
1546 - 0-0xFFFFFFFF for kernel */
1547 - struct restart_block restart_block;
1548 - struct thread_info *real_thread; /* Points to non-IRQ stack */
1551 -#define INIT_THREAD_INFO(tsk) \
1554 - .exec_domain = &default_exec_domain, \
1557 - .preempt_count = INIT_PREEMPT_COUNT, \
1558 - .addr_limit = KERNEL_DS, \
1559 - .restart_block = { \
1560 - .fn = do_no_restart_syscall, \
1562 - .real_thread = NULL, \
1565 -#define init_thread_info (init_thread_union.thread_info)
1566 -#define init_stack (init_thread_union.stack)
1568 -#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
1569 -/* how to get the thread information struct from C */
1570 -static inline struct thread_info *current_thread_info(void)
1572 - struct thread_info *ti;
1573 - unsigned long mask = THREAD_SIZE - 1;
1576 - asm volatile ("" : "=r" (p) : "0" (&ti));
1577 - ti = (struct thread_info *) (((unsigned long)p) & ~mask);
1581 -#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
1585 -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
1586 -#define TIF_SIGPENDING 1 /* signal pending */
1587 -#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
1588 -#define TIF_RESTART_BLOCK 4
1589 -#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
1590 -#define TIF_SYSCALL_AUDIT 6
1591 -#define TIF_RESTORE_SIGMASK 7
1592 -#define TIF_NOTIFY_RESUME 8
1594 -#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
1595 -#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
1596 -#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
1597 -#define _TIF_MEMDIE (1 << TIF_MEMDIE)
1598 -#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
1601 --- a/arch/um/include/asm/timex.h
1604 -#ifndef __UM_TIMEX_H
1605 -#define __UM_TIMEX_H
1607 -typedef unsigned long cycles_t;
1609 -static inline cycles_t get_cycles (void)
1614 -#define CLOCK_TICK_RATE (HZ)
1617 --- a/arch/um/include/asm/tlb.h
1623 -#include <linux/pagemap.h>
1624 -#include <linux/swap.h>
1625 -#include <asm/percpu.h>
1626 -#include <asm/pgalloc.h>
1627 -#include <asm/tlbflush.h>
1629 -#define tlb_start_vma(tlb, vma) do { } while (0)
1630 -#define tlb_end_vma(tlb, vma) do { } while (0)
1631 -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
1633 -/* struct mmu_gather is an opaque type used by the mm code for passing around
1634 - * any data needed by arch specific code for tlb_remove_page.
1636 -struct mmu_gather {
1637 - struct mm_struct *mm;
1638 - unsigned int need_flush; /* Really unmapped some ptes? */
1639 - unsigned long start;
1640 - unsigned long end;
1641 - unsigned int fullmm; /* non-zero means full mm flush */
1644 -static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
1645 - unsigned long address)
1647 - if (tlb->start > address)
1648 - tlb->start = address;
1649 - if (tlb->end < address + PAGE_SIZE)
1650 - tlb->end = address + PAGE_SIZE;
1653 -static inline void init_tlb_gather(struct mmu_gather *tlb)
1655 - tlb->need_flush = 0;
1657 - tlb->start = TASK_SIZE;
1660 - if (tlb->fullmm) {
1662 - tlb->end = TASK_SIZE;
1667 -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
1670 - tlb->start = start;
1672 - tlb->fullmm = !(start | (end+1));
1674 - init_tlb_gather(tlb);
1677 -extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
1678 - unsigned long end);
1681 -tlb_flush_mmu(struct mmu_gather *tlb)
1683 - if (!tlb->need_flush)
1686 - flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
1687 - init_tlb_gather(tlb);
1691 - * Called at the end of the shootdown operation to free up any resources
1692 - * that were required.
1695 -tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
1697 - tlb_flush_mmu(tlb);
1699 - /* keep the page table cache within bounds */
1700 - check_pgt_cache();
1704 - * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
1705 - * while handling the additional races in SMP caused by other CPUs
1706 - * caching valid mappings in their TLBs.
1708 -static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
1710 - tlb->need_flush = 1;
1711 - free_page_and_swap_cache(page);
1712 - return 1; /* avoid calling tlb_flush_mmu */
1715 -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
1717 - __tlb_remove_page(tlb, page);
1721 - * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
1723 - * Record the fact that pte's were really umapped in ->need_flush, so we can
1724 - * later optimise away the tlb invalidate. This helps when userspace is
1725 - * unmapping already-unmapped pages, which happens quite a lot.
1727 -#define tlb_remove_tlb_entry(tlb, ptep, address) \
1729 - tlb->need_flush = 1; \
1730 - __tlb_remove_tlb_entry(tlb, ptep, address); \
1733 -#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
1735 -#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
1737 -#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
1739 -#define tlb_migrate_finish(mm) do {} while (0)
1742 --- a/arch/um/include/asm/tlbflush.h
1746 - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
1747 - * Licensed under the GPL
1750 -#ifndef __UM_TLBFLUSH_H
1751 -#define __UM_TLBFLUSH_H
1753 -#include <linux/mm.h>
1758 - * - flush_tlb() flushes the current mm struct TLBs
1759 - * - flush_tlb_all() flushes all processes TLBs
1760 - * - flush_tlb_mm(mm) flushes the specified mm context TLB's
1761 - * - flush_tlb_page(vma, vmaddr) flushes one page
1762 - * - flush_tlb_kernel_vm() flushes the kernel vm area
1763 - * - flush_tlb_range(vma, start, end) flushes a range of pages
1766 -extern void flush_tlb_all(void);
1767 -extern void flush_tlb_mm(struct mm_struct *mm);
1768 -extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
1769 - unsigned long end);
1770 -extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
1771 -extern void flush_tlb_kernel_vm(void);
1772 -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
1773 -extern void __flush_tlb_one(unsigned long addr);
1776 --- a/arch/um/include/asm/uaccess.h
1780 - * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
1781 - * Licensed under the GPL
1784 -#ifndef __UM_UACCESS_H
1785 -#define __UM_UACCESS_H
1787 -/* thread_info has a mm_segment_t in it, so put the definition up here */
1789 - unsigned long seg;
1792 -#include <linux/thread_info.h>
1793 -#include <linux/errno.h>
1794 -#include <asm/processor.h>
1795 -#include <asm/elf.h>
1797 -#define VERIFY_READ 0
1798 -#define VERIFY_WRITE 1
1801 - * The fs value determines whether argument validity checking should be
1802 - * performed or not. If get_fs() == USER_DS, checking is performed, with
1803 - * get_fs() == KERNEL_DS, checking is bypassed.
1805 - * For historical reasons, these macros are grossly misnamed.
1808 -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
1810 -#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
1811 -#define USER_DS MAKE_MM_SEG(TASK_SIZE)
1813 -#define get_ds() (KERNEL_DS)
1814 -#define get_fs() (current_thread_info()->addr_limit)
1815 -#define set_fs(x) (current_thread_info()->addr_limit = (x))
1817 -#define segment_eq(a, b) ((a).seg == (b).seg)
1819 -#define __under_task_size(addr, size) \
1820 - (((unsigned long) (addr) < TASK_SIZE) && \
1821 - (((unsigned long) (addr) + (size)) < TASK_SIZE))
1823 -#define __access_ok_vsyscall(type, addr, size) \
1824 - ((type == VERIFY_READ) && \
1825 - ((unsigned long) (addr) >= FIXADDR_USER_START) && \
1826 - ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
1827 - ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
1829 -#define __addr_range_nowrap(addr, size) \
1830 - ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
1832 -#define access_ok(type, addr, size) \
1833 - (__addr_range_nowrap(addr, size) && \
1834 - (__under_task_size(addr, size) || \
1835 - __access_ok_vsyscall(type, addr, size) || \
1836 - segment_eq(get_fs(), KERNEL_DS)))
1838 -extern int copy_from_user(void *to, const void __user *from, int n);
1839 -extern int copy_to_user(void __user *to, const void *from, int n);
1842 - * strncpy_from_user: - Copy a NUL terminated string from userspace.
1843 - * @dst: Destination address, in kernel space. This buffer must be at
1844 - * least @count bytes long.
1845 - * @src: Source address, in user space.
1846 - * @count: Maximum number of bytes to copy, including the trailing NUL.
1848 - * Copies a NUL-terminated string from userspace to kernel space.
1850 - * On success, returns the length of the string (not including the trailing
1853 - * If access to userspace fails, returns -EFAULT (some data may have been
1856 - * If @count is smaller than the length of the string, copies @count bytes
1857 - * and returns @count.
1860 -extern int strncpy_from_user(char *dst, const char __user *src, int count);
1863 - * __clear_user: - Zero a block of memory in user space, with less checking.
1864 - * @to: Destination address, in user space.
1865 - * @n: Number of bytes to zero.
1867 - * Zero a block of memory in user space. Caller must check
1868 - * the specified block with access_ok() before calling this function.
1870 - * Returns number of bytes that could not be cleared.
1871 - * On success, this will be zero.
1873 -extern int __clear_user(void __user *mem, int len);
1876 - * clear_user: - Zero a block of memory in user space.
1877 - * @to: Destination address, in user space.
1878 - * @n: Number of bytes to zero.
1880 - * Zero a block of memory in user space.
1882 - * Returns number of bytes that could not be cleared.
1883 - * On success, this will be zero.
1885 -extern int clear_user(void __user *mem, int len);
1888 - * strlen_user: - Get the size of a string in user space.
1889 - * @str: The string to measure.
1890 - * @n: The maximum valid length
1892 - * Get the size of a NUL-terminated string in user space.
1894 - * Returns the size of the string INCLUDING the terminating NUL.
1895 - * On exception, returns 0.
1896 - * If the string is too long, returns a value greater than @n.
1898 -extern int strnlen_user(const void __user *str, int len);
1900 -#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
1902 -#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
1904 -#define __copy_to_user_inatomic __copy_to_user
1905 -#define __copy_from_user_inatomic __copy_from_user
1907 -#define __get_user(x, ptr) \
1909 - const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
1910 - __typeof__(x) __private_val; \
1911 - int __private_ret = -EFAULT; \
1912 - (x) = (__typeof__(*(__private_ptr)))0; \
1913 - if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
1914 - sizeof(*(__private_ptr))) == 0) { \
1915 - (x) = (__typeof__(*(__private_ptr))) __private_val; \
1916 - __private_ret = 0; \
1921 -#define get_user(x, ptr) \
1923 - const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
1924 - (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
1925 - __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
1928 -#define __put_user(x, ptr) \
1930 - __typeof__(*(ptr)) __user *__private_ptr = ptr; \
1931 - __typeof__(*(__private_ptr)) __private_val; \
1932 - int __private_ret = -EFAULT; \
1933 - __private_val = (__typeof__(*(__private_ptr))) (x); \
1934 - if (__copy_to_user((__private_ptr), &__private_val, \
1935 - sizeof(*(__private_ptr))) == 0) { \
1936 - __private_ret = 0; \
1941 -#define put_user(x, ptr) \
1943 - __typeof__(*(ptr)) __user *private_ptr = (ptr); \
1944 - (access_ok(VERIFY_WRITE, private_ptr, sizeof(*private_ptr)) ? \
1945 - __put_user(x, private_ptr) : -EFAULT); \
1948 -#define strlen_user(str) strnlen_user(str, ~0U >> 1)
1950 -struct exception_table_entry
1952 - unsigned long insn;
1953 - unsigned long fixup;
1958 +++ b/arch/um/include/uapi/asm/Kbuild
1960 +generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
1961 +generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
1962 +generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
1963 +generic-y += switch_to.h clkdev.h
1964 +generic-y += trace_clock.h
1965 +generic-y += preempt.h
1966 +generic-y += hash.h
1967 +generic-y += barrier.h
1969 +++ b/arch/um/include/uapi/asm/a.out-core.h
1971 +/* a.out coredump register dumper
1973 + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
1974 + * Written by David Howells (dhowells@redhat.com)
1976 + * This program is free software; you can redistribute it and/or
1977 + * modify it under the terms of the GNU General Public Licence
1978 + * as published by the Free Software Foundation; either version
1979 + * 2 of the Licence, or (at your option) any later version.
1982 +#ifndef __UM_A_OUT_CORE_H
1983 +#define __UM_A_OUT_CORE_H
1987 +#include <linux/user.h>
1990 + * fill in the user structure for an a.out core dump
1992 +static inline void aout_dump_thread(struct pt_regs *regs, struct user *u)
1996 +#endif /* __KERNEL__ */
1997 +#endif /* __UM_A_OUT_CORE_H */
1999 +++ b/arch/um/include/uapi/asm/bugs.h
2001 +#ifndef __UM_BUGS_H
2002 +#define __UM_BUGS_H
2004 +void check_bugs(void);
2008 +++ b/arch/um/include/uapi/asm/cache.h
2010 +#ifndef __UM_CACHE_H
2011 +#define __UM_CACHE_H
2014 +#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
2015 +# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
2016 +#elif defined(CONFIG_UML_X86) /* 64-bit */
2017 +# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
2019 +/* XXX: this was taken from x86, now it's completely random. Luckily only
2020 + * affects SMP padding. */
2021 +# define L1_CACHE_SHIFT 5
2024 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2028 +++ b/arch/um/include/uapi/asm/common.lds.S
2030 +#include <asm-generic/vmlinux.lds.h>
2032 + .fini : { *(.fini) } =0x9090
2034 + PROVIDE (etext = .);
2038 + PROVIDE (sdata = .);
2042 + .unprotected : { *(.unprotected) }
2044 + PROVIDE (_unprotected_end = .);
2047 + .note : { *(.note.*) }
2048 + EXCEPTION_TABLE(0)
2052 + .uml.setup.init : {
2053 + __uml_setup_start = .;
2054 + *(.uml.setup.init)
2055 + __uml_setup_end = .;
2058 + .uml.help.init : {
2059 + __uml_help_start = .;
2061 + __uml_help_end = .;
2064 + .uml.postsetup.init : {
2065 + __uml_postsetup_start = .;
2066 + *(.uml.postsetup.init)
2067 + __uml_postsetup_end = .;
2074 + PERCPU_SECTION(32)
2076 + .initcall.init : {
2080 + .con_initcall.init : {
2084 + .uml.initcall.init : {
2085 + __uml_initcall_start = .;
2086 + *(.uml.initcall.init)
2087 + __uml_initcall_end = .;
2093 + __exitcall_begin = .;
2095 + __exitcall_end = .;
2099 + __uml_exitcall_begin = .;
2100 + *(.uml.exitcall.exit)
2101 + __uml_exitcall_end = .;
2105 + .altinstructions : {
2106 + __alt_instructions = .;
2107 + *(.altinstructions)
2108 + __alt_instructions_end = .;
2110 + .altinstr_replacement : { *(.altinstr_replacement) }
2111 + /* .exit.text is discard at runtime, not link time, to deal with references
2112 + from .altinstructions and .eh_frame */
2113 + .exit.text : { *(.exit.text) }
2114 + .exit.data : { *(.exit.data) }
2116 + .preinit_array : {
2117 + __preinit_array_start = .;
2119 + __preinit_array_end = .;
2122 + __init_array_start = .;
2124 + __init_array_end = .;
2127 + __fini_array_start = .;
2129 + __fini_array_end = .;
2138 +++ b/arch/um/include/uapi/asm/dma.h
2143 +#include <asm/io.h>
2145 +extern unsigned long uml_physmem;
2147 +#define MAX_DMA_ADDRESS (uml_physmem)
2151 +++ b/arch/um/include/uapi/asm/fixmap.h
2153 +#ifndef __UM_FIXMAP_H
2154 +#define __UM_FIXMAP_H
2156 +#include <asm/processor.h>
2157 +#include <asm/kmap_types.h>
2158 +#include <asm/archparam.h>
2159 +#include <asm/page.h>
2160 +#include <linux/threads.h>
2163 + * Here we define all the compile-time 'special' virtual
2164 + * addresses. The point is to have a constant address at
2165 + * compile time, but to set the physical address only
2166 + * in the boot process. We allocate these special addresses
2167 + * from the end of virtual memory (0xfffff000) backwards.
2168 + * Also this lets us do fail-safe vmalloc(), we
2169 + * can guarantee that these special addresses and
2170 + * vmalloc()-ed addresses never overlap.
2172 + * these 'compile-time allocated' memory buffers are
2173 + * fixed-size 4k pages. (or larger if used with an increment
2174 + * highger than 1) use fixmap_set(idx,phys) to associate
2175 + * physical memory with fixmap indices.
2177 + * TLB entries of such buffers will not be flushed across
2182 + * on UP currently we will have no trace of the fixmap mechanizm,
2183 + * no page table allocations, etc. This might change in the
2184 + * future, say framebuffers for the console driver(s) could be
2187 +enum fixed_addresses {
2188 +#ifdef CONFIG_HIGHMEM
2189 + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
2190 + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
2192 + __end_of_fixed_addresses
2195 +extern void __set_fixmap (enum fixed_addresses idx,
2196 + unsigned long phys, pgprot_t flags);
2199 + * used by vmalloc.c.
2201 + * Leave one empty page between vmalloc'ed areas and
2202 + * the start of the fixmap, and leave one page empty
2203 + * at the top of mem..
2206 +#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
2207 +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
2208 +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
2210 +#include <asm-generic/fixmap.h>
2214 +++ b/arch/um/include/uapi/asm/irq.h
2219 +#define TIMER_IRQ 0
2221 +#define CONSOLE_IRQ 2
2222 +#define CONSOLE_WRITE_IRQ 3
2224 +#define UM_ETH_IRQ 5
2226 +#define SSL_WRITE_IRQ 7
2227 +#define ACCEPT_IRQ 8
2228 +#define MCONSOLE_IRQ 9
2229 +#define WINCH_IRQ 10
2230 +#define SIGIO_WRITE_IRQ 11
2231 +#define TELNETD_IRQ 12
2232 +#define XTERM_IRQ 13
2233 +#define RANDOM_IRQ 14
2235 +#define LAST_IRQ RANDOM_IRQ
2236 +#define NR_IRQS (LAST_IRQ + 1)
2240 +++ b/arch/um/include/uapi/asm/irqflags.h
2242 +#ifndef __UM_IRQFLAGS_H
2243 +#define __UM_IRQFLAGS_H
2245 +extern int get_signals(void);
2246 +extern int set_signals(int enable);
2247 +extern void block_signals(void);
2248 +extern void unblock_signals(void);
2250 +static inline unsigned long arch_local_save_flags(void)
2252 + return get_signals();
2255 +static inline void arch_local_irq_restore(unsigned long flags)
2257 + set_signals(flags);
2260 +static inline void arch_local_irq_enable(void)
2262 + unblock_signals();
2265 +static inline void arch_local_irq_disable(void)
2270 +static inline unsigned long arch_local_irq_save(void)
2272 + unsigned long flags;
2273 + flags = arch_local_save_flags();
2274 + arch_local_irq_disable();
2278 +static inline bool arch_irqs_disabled(void)
2280 + return arch_local_save_flags() == 0;
2285 +++ b/arch/um/include/uapi/asm/kmap_types.h
2288 + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
2289 + * Licensed under the GPL
2292 +#ifndef __UM_KMAP_TYPES_H
2293 +#define __UM_KMAP_TYPES_H
2295 +/* No more #include "asm/arch/kmap_types.h" ! */
2297 +#define KM_TYPE_NR 14
2301 +++ b/arch/um/include/uapi/asm/kvm_para.h
2303 +#include <asm-generic/kvm_para.h>
2305 +++ b/arch/um/include/uapi/asm/mmu.h
2308 + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
2309 + * Licensed under the GPL
2312 +#ifndef __ARCH_UM_MMU_H
2313 +#define __ARCH_UM_MMU_H
2316 +#include <asm/mm_context.h>
2318 +typedef struct mm_context {
2320 + struct uml_arch_mm_context arch;
2321 + struct page *stub_pages[2];
2324 +extern void __switch_mm(struct mm_id * mm_idp);
2326 +/* Avoid tangled inclusion with asm/ldt.h */
2327 +extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
2328 +extern void free_ldt(struct mm_context *mm);
2332 +++ b/arch/um/include/uapi/asm/mmu_context.h
2335 + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
2336 + * Licensed under the GPL
2339 +#ifndef __UM_MMU_CONTEXT_H
2340 +#define __UM_MMU_CONTEXT_H
2342 +#include <linux/sched.h>
2343 +#include <asm/mmu.h>
2345 +extern void uml_setup_stubs(struct mm_struct *mm);
2346 +extern void arch_exit_mmap(struct mm_struct *mm);
2348 +#define deactivate_mm(tsk,mm) do { } while (0)
2350 +extern void force_flush_all(void);
2352 +static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
2355 + * This is called by fs/exec.c and sys_unshare()
2356 + * when the new ->mm is used for the first time.
2358 + __switch_mm(&new->context.id);
2359 + down_write(&new->mmap_sem);
2360 + uml_setup_stubs(new);
2361 + up_write(&new->mmap_sem);
2364 +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
2365 + struct task_struct *tsk)
2367 + unsigned cpu = smp_processor_id();
2370 + cpumask_clear_cpu(cpu, mm_cpumask(prev));
2371 + cpumask_set_cpu(cpu, mm_cpumask(next));
2372 + if(next != &init_mm)
2373 + __switch_mm(&next->context.id);
2377 +static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
2379 + uml_setup_stubs(mm);
2382 +static inline void enter_lazy_tlb(struct mm_struct *mm,
2383 + struct task_struct *tsk)
2387 +extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
2389 +extern void destroy_context(struct mm_struct *mm);
2393 +++ b/arch/um/include/uapi/asm/page.h
2396 + * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
2397 + * Copyright 2003 PathScale, Inc.
2398 + * Licensed under the GPL
2401 +#ifndef __UM_PAGE_H
2402 +#define __UM_PAGE_H
2404 +#include <linux/const.h>
2406 +/* PAGE_SHIFT determines the page size */
2407 +#define PAGE_SHIFT 12
2408 +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
2409 +#define PAGE_MASK (~(PAGE_SIZE-1))
2411 +#ifndef __ASSEMBLY__
2415 +#include <linux/types.h>
2416 +#include <asm/vm-flags.h>
2419 + * These are used to make use of C type-checking..
2422 +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
2423 +#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
2425 +#define clear_user_page(page, vaddr, pg) clear_page(page)
2426 +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
2428 +#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
2430 +typedef struct { unsigned long pte_low, pte_high; } pte_t;
2431 +typedef struct { unsigned long pmd; } pmd_t;
2432 +typedef struct { unsigned long pgd; } pgd_t;
2433 +#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
2435 +#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
2436 +#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
2437 +#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
2438 +#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
2440 + (to).pte_low = (from).pte_low; })
2441 +#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
2442 +#define pte_set_val(pte, phys, prot) \
2443 + ({ (pte).pte_high = (phys) >> 32; \
2444 + (pte).pte_low = (phys) | pgprot_val(prot); })
2446 +#define pmd_val(x) ((x).pmd)
2447 +#define __pmd(x) ((pmd_t) { (x) } )
2449 +typedef unsigned long long pfn_t;
2450 +typedef unsigned long long phys_t;
2454 +typedef struct { unsigned long pte; } pte_t;
2455 +typedef struct { unsigned long pgd; } pgd_t;
2457 +#ifdef CONFIG_3_LEVEL_PGTABLES
2458 +typedef struct { unsigned long pmd; } pmd_t;
2459 +#define pmd_val(x) ((x).pmd)
2460 +#define __pmd(x) ((pmd_t) { (x) } )
2463 +#define pte_val(x) ((x).pte)
2466 +#define pte_get_bits(p, bits) ((p).pte & (bits))
2467 +#define pte_set_bits(p, bits) ((p).pte |= (bits))
2468 +#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
2469 +#define pte_copy(to, from) ((to).pte = (from).pte)
2470 +#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
2471 +#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
2473 +typedef unsigned long pfn_t;
2474 +typedef unsigned long phys_t;
2478 +typedef struct { unsigned long pgprot; } pgprot_t;
2480 +typedef struct page *pgtable_t;
2482 +#define pgd_val(x) ((x).pgd)
2483 +#define pgprot_val(x) ((x).pgprot)
2485 +#define __pte(x) ((pte_t) { (x) } )
2486 +#define __pgd(x) ((pgd_t) { (x) } )
2487 +#define __pgprot(x) ((pgprot_t) { (x) } )
2489 +extern unsigned long uml_physmem;
2491 +#define PAGE_OFFSET (uml_physmem)
2492 +#define KERNELBASE PAGE_OFFSET
2494 +#define __va_space (8*1024*1024)
2498 +/* Cast to unsigned long before casting to void * to avoid a warning from
2499 + * mmap_kmem about cutting a long long down to a void *. Not sure that
2500 + * casting is the right thing, but 32-bit UML can't have 64-bit virtual
2503 +#define __pa(virt) to_phys((void *) (unsigned long) (virt))
2504 +#define __va(phys) to_virt((unsigned long) (phys))
2506 +#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
2507 +#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
2509 +#define pfn_valid(pfn) ((pfn) < max_mapnr)
2510 +#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
2512 +#include <asm-generic/memory_model.h>
2513 +#include <asm-generic/getorder.h>
2515 +#endif /* __ASSEMBLY__ */
2516 +#endif /* __UM_PAGE_H */
2518 +++ b/arch/um/include/uapi/asm/pgalloc.h
2521 + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
2522 + * Copyright 2003 PathScale, Inc.
2523 + * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
2524 + * Licensed under the GPL
2527 +#ifndef __UM_PGALLOC_H
2528 +#define __UM_PGALLOC_H
2530 +#include <linux/mm.h>
2532 +#define pmd_populate_kernel(mm, pmd, pte) \
2533 + set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
2535 +#define pmd_populate(mm, pmd, pte) \
2536 + set_pmd(pmd, __pmd(_PAGE_TABLE + \
2537 + ((unsigned long long)page_to_pfn(pte) << \
2538 + (unsigned long long) PAGE_SHIFT)))
2539 +#define pmd_pgtable(pmd) pmd_page(pmd)
2542 + * Allocate and free page tables.
2544 +extern pgd_t *pgd_alloc(struct mm_struct *);
2545 +extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
2547 +extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
2548 +extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
2550 +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2552 + free_page((unsigned long) pte);
2555 +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
2557 + pgtable_page_dtor(pte);
2561 +#define __pte_free_tlb(tlb,pte, address) \
2563 + pgtable_page_dtor(pte); \
2564 + tlb_remove_page((tlb),(pte)); \
2567 +#ifdef CONFIG_3_LEVEL_PGTABLES
2569 +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
2571 + free_page((unsigned long)pmd);
2574 +#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
2577 +#define check_pgt_cache() do { } while (0)
2582 +++ b/arch/um/include/uapi/asm/pgtable-2level.h
2585 + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
2586 + * Copyright 2003 PathScale, Inc.
2587 + * Derived from include/asm-i386/pgtable.h
2588 + * Licensed under the GPL
2591 +#ifndef __UM_PGTABLE_2LEVEL_H
2592 +#define __UM_PGTABLE_2LEVEL_H
2594 +#include <asm-generic/pgtable-nopmd.h>
2596 +/* PGDIR_SHIFT determines what a third-level page table entry can map */
2598 +#define PGDIR_SHIFT 22
2599 +#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
2600 +#define PGDIR_MASK (~(PGDIR_SIZE-1))
2603 + * entries per page directory level: the i386 is two-level, so
2604 + * we don't really have any PMD directory physically.
2606 +#define PTRS_PER_PTE 1024
2607 +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
2608 +#define PTRS_PER_PGD 1024
2609 +#define FIRST_USER_ADDRESS 0
2611 +#define pte_ERROR(e) \
2612 + printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
2614 +#define pgd_ERROR(e) \
2615 + printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
2618 +static inline int pgd_newpage(pgd_t pgd) { return 0; }
2619 +static inline void pgd_mkuptodate(pgd_t pgd) { }
2621 +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
2623 +#define pte_pfn(x) phys_to_pfn(pte_val(x))
2624 +#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
2625 +#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
2628 + * Bits 0 through 4 are taken
2630 +#define PTE_FILE_MAX_BITS 27
2632 +#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
2634 +#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
2638 +++ b/arch/um/include/uapi/asm/pgtable-3level.h
2641 + * Copyright 2003 PathScale Inc
2642 + * Derived from include/asm-i386/pgtable.h
2643 + * Licensed under the GPL
2646 +#ifndef __UM_PGTABLE_3LEVEL_H
2647 +#define __UM_PGTABLE_3LEVEL_H
2649 +#include <asm-generic/pgtable-nopud.h>
2651 +/* PGDIR_SHIFT determines what a third-level page table entry can map */
2653 +#ifdef CONFIG_64BIT
2654 +#define PGDIR_SHIFT 30
2656 +#define PGDIR_SHIFT 31
2658 +#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
2659 +#define PGDIR_MASK (~(PGDIR_SIZE-1))
2661 +/* PMD_SHIFT determines the size of the area a second-level page table can
2665 +#define PMD_SHIFT 21
2666 +#define PMD_SIZE (1UL << PMD_SHIFT)
2667 +#define PMD_MASK (~(PMD_SIZE-1))
2670 + * entries per page directory level
2673 +#define PTRS_PER_PTE 512
2674 +#ifdef CONFIG_64BIT
2675 +#define PTRS_PER_PMD 512
2676 +#define PTRS_PER_PGD 512
2678 +#define PTRS_PER_PMD 1024
2679 +#define PTRS_PER_PGD 1024
2682 +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
2683 +#define FIRST_USER_ADDRESS 0
2685 +#define pte_ERROR(e) \
2686 + printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
2688 +#define pmd_ERROR(e) \
2689 + printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
2691 +#define pgd_ERROR(e) \
2692 + printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
2695 +#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
2696 +#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
2697 +#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
2698 +#define pud_populate(mm, pud, pmd) \
2699 + set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
2701 +#ifdef CONFIG_64BIT
2702 +#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
2704 +#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
2707 +static inline int pgd_newpage(pgd_t pgd)
2709 + return(pgd_val(pgd) & _PAGE_NEWPAGE);
2712 +static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
2714 +#ifdef CONFIG_64BIT
2715 +#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
2717 +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
2721 +extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
2723 +static inline void pud_clear (pud_t *pud)
2725 + set_pud(pud, __pud(_PAGE_NEWPAGE));
2728 +#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
2729 +#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
2731 +/* Find an entry in the second-level page table.. */
2732 +#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
2733 + pmd_index(address))
2735 +static inline unsigned long pte_pfn(pte_t pte)
2737 + return phys_to_pfn(pte_val(pte));
2740 +static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
2743 + phys_t phys = pfn_to_phys(page_nr);
2745 + pte_set_val(pte, phys, pgprot);
2749 +static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
2751 + return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
2755 + * Bits 0 through 3 are taken in the low part of the pte,
2756 + * put the 32 bits of offset into the high part.
2758 +#define PTE_FILE_MAX_BITS 32
2760 +#ifdef CONFIG_64BIT
2762 +#define pte_to_pgoff(p) ((p).pte >> 32)
2764 +#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
2768 +#define pte_to_pgoff(pte) ((pte).pte_high)
2770 +#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
2777 +++ b/arch/um/include/uapi/asm/pgtable.h
2780 + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
2781 + * Copyright 2003 PathScale, Inc.
2782 + * Derived from include/asm-i386/pgtable.h
2783 + * Licensed under the GPL
2786 +#ifndef __UM_PGTABLE_H
2787 +#define __UM_PGTABLE_H
2789 +#include <asm/fixmap.h>
2791 +#define _PAGE_PRESENT 0x001
2792 +#define _PAGE_NEWPAGE 0x002
2793 +#define _PAGE_NEWPROT 0x004
2794 +#define _PAGE_RW 0x020
2795 +#define _PAGE_USER 0x040
2796 +#define _PAGE_ACCESSED 0x080
2797 +#define _PAGE_DIRTY 0x100
2798 +/* If _PAGE_PRESENT is clear, we use these: */
2799 +#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
2800 +#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
2801 + pte_present gives true */
2803 +#ifdef CONFIG_3_LEVEL_PGTABLES
2804 +#include <asm/pgtable-3level.h>
2806 +#include <asm/pgtable-2level.h>
2809 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
2811 +/* zero page used for uninitialized stuff */
2812 +extern unsigned long *empty_zero_page;
2814 +#define pgtable_cache_init() do ; while (0)
2816 +/* Just any arbitrary offset to the start of the vmalloc VM area: the
2817 + * current 8MB value just means that there will be a 8MB "hole" after the
2818 + * physical memory until the kernel virtual memory starts. That means that
2819 + * any out-of-bounds memory accesses will hopefully be caught.
2820 + * The vmalloc() routines leaves a hole of 4kB between each vmalloced
2821 + * area for the same reason. ;)
2824 +extern unsigned long end_iomem;
2826 +#define VMALLOC_OFFSET (__va_space)
2827 +#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
2828 +#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
2829 +#ifdef CONFIG_HIGHMEM
2830 +# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
2832 +# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
2834 +#define MODULES_VADDR VMALLOC_START
2835 +#define MODULES_END VMALLOC_END
2836 +#define MODULES_LEN (MODULES_VADDR - MODULES_END)
2838 +#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
2839 +#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
2840 +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
2841 +#define __PAGE_KERNEL_EXEC \
2842 + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
2843 +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
2844 +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
2845 +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
2846 +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
2847 +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
2848 +#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
2851 + * The i386 can't do page protection for execute, and considers that the same
2853 + * Also, write permissions imply read permissions. This is the closest we can
2856 +#define __P000 PAGE_NONE
2857 +#define __P001 PAGE_READONLY
2858 +#define __P010 PAGE_COPY
2859 +#define __P011 PAGE_COPY
2860 +#define __P100 PAGE_READONLY
2861 +#define __P101 PAGE_READONLY
2862 +#define __P110 PAGE_COPY
2863 +#define __P111 PAGE_COPY
2865 +#define __S000 PAGE_NONE
2866 +#define __S001 PAGE_READONLY
2867 +#define __S010 PAGE_SHARED
2868 +#define __S011 PAGE_SHARED
2869 +#define __S100 PAGE_READONLY
2870 +#define __S101 PAGE_READONLY
2871 +#define __S110 PAGE_SHARED
2872 +#define __S111 PAGE_SHARED
2875 + * ZERO_PAGE is a global shared page that is always zero: used
2876 + * for zero-mapped memory areas etc..
2878 +#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
2880 +#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
2882 +#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
2883 +#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
2885 +#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
2886 +#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
2888 +#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
2889 +#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
2891 +#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
2892 +#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
2894 +#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
2896 +#define pte_page(x) pfn_to_page(pte_pfn(x))
2898 +#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
2901 + * =================================
2902 + * Flags checking section.
2903 + * =================================
2906 +static inline int pte_none(pte_t pte)
2908 + return pte_is_zero(pte);
2912 + * The following only work if pte_present() is true.
2913 + * Undefined behaviour if not..
2915 +static inline int pte_read(pte_t pte)
2917 + return((pte_get_bits(pte, _PAGE_USER)) &&
2918 + !(pte_get_bits(pte, _PAGE_PROTNONE)));
2921 +static inline int pte_exec(pte_t pte){
2922 + return((pte_get_bits(pte, _PAGE_USER)) &&
2923 + !(pte_get_bits(pte, _PAGE_PROTNONE)));
2926 +static inline int pte_write(pte_t pte)
2928 + return((pte_get_bits(pte, _PAGE_RW)) &&
2929 + !(pte_get_bits(pte, _PAGE_PROTNONE)));
2933 + * The following only works if pte_present() is not true.
2935 +static inline int pte_file(pte_t pte)
2937 + return pte_get_bits(pte, _PAGE_FILE);
2940 +static inline int pte_dirty(pte_t pte)
2942 + return pte_get_bits(pte, _PAGE_DIRTY);
2945 +static inline int pte_young(pte_t pte)
2947 + return pte_get_bits(pte, _PAGE_ACCESSED);
2950 +static inline int pte_newpage(pte_t pte)
2952 + return pte_get_bits(pte, _PAGE_NEWPAGE);
2955 +static inline int pte_newprot(pte_t pte)
2957 + return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
2960 +static inline int pte_special(pte_t pte)
2966 + * =================================
2967 + * Flags setting section.
2968 + * =================================
2971 +static inline pte_t pte_mknewprot(pte_t pte)
2973 + pte_set_bits(pte, _PAGE_NEWPROT);
2977 +static inline pte_t pte_mkclean(pte_t pte)
2979 + pte_clear_bits(pte, _PAGE_DIRTY);
2983 +static inline pte_t pte_mkold(pte_t pte)
2985 + pte_clear_bits(pte, _PAGE_ACCESSED);
2989 +static inline pte_t pte_wrprotect(pte_t pte)
2991 + pte_clear_bits(pte, _PAGE_RW);
2992 + return(pte_mknewprot(pte));
2995 +static inline pte_t pte_mkread(pte_t pte)
2997 + pte_set_bits(pte, _PAGE_USER);
2998 + return(pte_mknewprot(pte));
3001 +static inline pte_t pte_mkdirty(pte_t pte)
3003 + pte_set_bits(pte, _PAGE_DIRTY);
3007 +static inline pte_t pte_mkyoung(pte_t pte)
3009 + pte_set_bits(pte, _PAGE_ACCESSED);
3013 +static inline pte_t pte_mkwrite(pte_t pte)
3015 + pte_set_bits(pte, _PAGE_RW);
3016 + return(pte_mknewprot(pte));
3019 +static inline pte_t pte_mkuptodate(pte_t pte)
3021 + pte_clear_bits(pte, _PAGE_NEWPAGE);
3022 + if(pte_present(pte))
3023 + pte_clear_bits(pte, _PAGE_NEWPROT);
3027 +static inline pte_t pte_mknewpage(pte_t pte)
3029 + pte_set_bits(pte, _PAGE_NEWPAGE);
3033 +static inline pte_t pte_mkspecial(pte_t pte)
3038 +static inline void set_pte(pte_t *pteptr, pte_t pteval)
3040 + pte_copy(*pteptr, pteval);
3042 + /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
3043 + * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
3047 + *pteptr = pte_mknewpage(*pteptr);
3048 + if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
3050 +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
3052 +#define __HAVE_ARCH_PTE_SAME
3053 +static inline int pte_same(pte_t pte_a, pte_t pte_b)
3055 + return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
3059 + * Conversion functions: convert a page and protection to a page entry,
3060 + * and a page entry and page directory to the page they refer to.
3063 +#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
3064 +#define __virt_to_page(virt) phys_to_page(__pa(virt))
3065 +#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
3066 +#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
3068 +#define mk_pte(page, pgprot) \
3071 + pte_set_val(pte, page_to_phys(page), (pgprot)); \
3072 + if (pte_present(pte)) \
3073 + pte_mknewprot(pte_mknewpage(pte)); \
3076 +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
3078 + pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
3083 + * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
3085 + * this macro returns the index of the entry in the pgd page which would
3086 + * control the given virtual address
3088 +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
3091 + * pgd_offset() returns a (pgd_t *)
3092 + * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
3094 +#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
3097 + * a shortcut which implies the use of the kernel's pgd, instead
3100 +#define pgd_offset_k(address) pgd_offset(&init_mm, address)
3103 + * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
3105 + * this macro returns the index of the entry in the pmd page which would
3106 + * control the given virtual address
3108 +#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
3109 +#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
3111 +#define pmd_page_vaddr(pmd) \
3112 + ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
3115 + * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
3117 + * this macro returns the index of the entry in the pte page which would
3118 + * control the given virtual address
3120 +#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
3121 +#define pte_offset_kernel(dir, address) \
3122 + ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
3123 +#define pte_offset_map(dir, address) \
3124 + ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
3125 +#define pte_unmap(pte) do { } while (0)
3128 +extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
3130 +#define update_mmu_cache(vma,address,ptep) do ; while (0)
3132 +/* Encode and de-code a swap entry */
3133 +#define __swp_type(x) (((x).val >> 5) & 0x1f)
3134 +#define __swp_offset(x) ((x).val >> 11)
3136 +#define __swp_entry(type, offset) \
3137 + ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
3138 +#define __pte_to_swp_entry(pte) \
3139 + ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
3140 +#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
3142 +#define kern_addr_valid(addr) (1)
3144 +#include <asm-generic/pgtable.h>
3146 +/* Clear a kernel PTE and flush it from the TLB */
3147 +#define kpte_clear_flush(ptep, vaddr) \
3149 + pte_clear(&init_mm, (vaddr), (ptep)); \
3150 + __flush_tlb_one((vaddr)); \
3155 +++ b/arch/um/include/uapi/asm/processor-generic.h
3158 + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3159 + * Licensed under the GPL
3162 +#ifndef __UM_PROCESSOR_GENERIC_H
3163 +#define __UM_PROCESSOR_GENERIC_H
3167 +struct task_struct;
3169 +#include <asm/ptrace.h>
3170 +#include <registers.h>
3171 +#include <sysdep/archsetjmp.h>
3173 +#include <linux/prefetch.h>
3177 +struct thread_struct {
3178 + struct pt_regs regs;
3179 + struct pt_regs *segv_regs;
3180 + int singlestep_syscall;
3182 + jmp_buf *fault_catcher;
3183 + struct task_struct *prev_sched;
3184 + struct arch_thread arch;
3185 + jmp_buf switch_buf;
3193 + int (*proc)(void *);
3197 + void (*proc)(void *);
3204 +#define INIT_THREAD \
3206 + .regs = EMPTY_REGS, \
3207 + .fault_addr = NULL, \
3208 + .prev_sched = NULL, \
3209 + .arch = INIT_ARCH_THREAD, \
3210 + .request = { 0 } \
3213 +static inline void release_thread(struct task_struct *task)
3217 +extern unsigned long thread_saved_pc(struct task_struct *t);
3219 +static inline void mm_copy_segments(struct mm_struct *from_mm,
3220 + struct mm_struct *new_mm)
3224 +#define init_stack (init_thread_union.stack)
3227 + * User space process size: 3GB (default).
3229 +extern unsigned long task_size;
3231 +#define TASK_SIZE (task_size)
3234 +#undef STACK_TOP_MAX
3236 +extern unsigned long stacksizelim;
3238 +#define STACK_ROOM (stacksizelim)
3239 +#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
3240 +#define STACK_TOP_MAX STACK_TOP
3242 +/* This decides where the kernel will search for a free chunk of vm
3243 + * space during mmap's.
3245 +#define TASK_UNMAPPED_BASE (0x40000000)
3247 +extern void start_thread(struct pt_regs *regs, unsigned long entry,
3248 + unsigned long stack);
3250 +struct cpuinfo_um {
3251 + unsigned long loops_per_jiffy;
3255 +extern struct cpuinfo_um boot_cpu_data;
3257 +#define my_cpu_data cpu_data[smp_processor_id()]
3260 +extern struct cpuinfo_um cpu_data[];
3261 +#define current_cpu_data cpu_data[smp_processor_id()]
3263 +#define cpu_data (&boot_cpu_data)
3264 +#define current_cpu_data boot_cpu_data
3268 +#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
3269 +extern unsigned long get_wchan(struct task_struct *p);
3273 +++ b/arch/um/include/uapi/asm/ptrace-generic.h
3276 + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3277 + * Licensed under the GPL
3280 +#ifndef __UM_PTRACE_GENERIC_H
3281 +#define __UM_PTRACE_GENERIC_H
3283 +#ifndef __ASSEMBLY__
3285 +#include <asm/ptrace-abi.h>
3286 +#include <sysdep/ptrace.h>
3289 + struct uml_pt_regs regs;
3292 +#define arch_has_single_step() (1)
3294 +#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
3296 +#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
3297 +#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
3299 +#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs)
3301 +#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs)
3303 +#define instruction_pointer(regs) PT_REGS_IP(regs)
3305 +struct task_struct;
3307 +extern long subarch_ptrace(struct task_struct *child, long request,
3308 + unsigned long addr, unsigned long data);
3309 +extern unsigned long getreg(struct task_struct *child, int regno);
3310 +extern int putreg(struct task_struct *child, int regno, unsigned long value);
3312 +extern int arch_copy_tls(struct task_struct *new);
3313 +extern void clear_flushed_tls(struct task_struct *task);
3314 +extern void syscall_trace_enter(struct pt_regs *regs);
3315 +extern void syscall_trace_leave(struct pt_regs *regs);
3321 +++ b/arch/um/include/uapi/asm/setup.h
3323 +#ifndef SETUP_H_INCLUDED
3324 +#define SETUP_H_INCLUDED
3326 +/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the
3327 + * command line, so this choice is ok.
3330 +#define COMMAND_LINE_SIZE 4096
3332 +#endif /* SETUP_H_INCLUDED */
3334 +++ b/arch/um/include/uapi/asm/smp.h
3341 +#include <linux/bitops.h>
3342 +#include <asm/current.h>
3343 +#include <linux/cpumask.h>
3345 +#define raw_smp_processor_id() (current_thread->cpu)
3347 +#define cpu_logical_map(n) (n)
3348 +#define cpu_number_map(n) (n)
3349 +extern int hard_smp_processor_id(void);
3350 +#define NO_PROC_ID -1
3355 +static inline void smp_cpus_done(unsigned int maxcpus)
3359 +extern struct task_struct *idle_threads[NR_CPUS];
3363 +#define hard_smp_processor_id() 0
3369 +++ b/arch/um/include/uapi/asm/sysrq.h
3371 +#ifndef __UM_SYSRQ_H
3372 +#define __UM_SYSRQ_H
3374 +struct task_struct;
3375 +extern void show_trace(struct task_struct* task, unsigned long *stack);
3379 +++ b/arch/um/include/uapi/asm/thread_info.h
3382 + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3383 + * Licensed under the GPL
3386 +#ifndef __UM_THREAD_INFO_H
3387 +#define __UM_THREAD_INFO_H
3389 +#ifndef __ASSEMBLY__
3391 +#include <asm/types.h>
3392 +#include <asm/page.h>
3393 +#include <asm/uaccess.h>
3395 +struct thread_info {
3396 + struct task_struct *task; /* main task structure */
3397 + struct exec_domain *exec_domain; /* execution domain */
3398 + unsigned long flags; /* low level flags */
3399 + __u32 cpu; /* current CPU */
3400 + int preempt_count; /* 0 => preemptable,
3402 + mm_segment_t addr_limit; /* thread address space:
3403 + 0-0xBFFFFFFF for user
3404 + 0-0xFFFFFFFF for kernel */
3405 + struct restart_block restart_block;
3406 + struct thread_info *real_thread; /* Points to non-IRQ stack */
3409 +#define INIT_THREAD_INFO(tsk) \
3412 + .exec_domain = &default_exec_domain, \
3415 + .preempt_count = INIT_PREEMPT_COUNT, \
3416 + .addr_limit = KERNEL_DS, \
3417 + .restart_block = { \
3418 + .fn = do_no_restart_syscall, \
3420 + .real_thread = NULL, \
3423 +#define init_thread_info (init_thread_union.thread_info)
3424 +#define init_stack (init_thread_union.stack)
3426 +#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
3427 +/* how to get the thread information struct from C */
3428 +static inline struct thread_info *current_thread_info(void)
3430 + struct thread_info *ti;
3431 + unsigned long mask = THREAD_SIZE - 1;
3434 + asm volatile ("" : "=r" (p) : "0" (&ti));
3435 + ti = (struct thread_info *) (((unsigned long)p) & ~mask);
3439 +#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
3443 +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
3444 +#define TIF_SIGPENDING 1 /* signal pending */
3445 +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
3446 +#define TIF_RESTART_BLOCK 4
3447 +#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
3448 +#define TIF_SYSCALL_AUDIT 6
3449 +#define TIF_RESTORE_SIGMASK 7
3450 +#define TIF_NOTIFY_RESUME 8
3452 +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
3453 +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
3454 +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
3455 +#define _TIF_MEMDIE (1 << TIF_MEMDIE)
3456 +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
3460 +++ b/arch/um/include/uapi/asm/timex.h
3462 +#ifndef __UM_TIMEX_H
3463 +#define __UM_TIMEX_H
3465 +typedef unsigned long cycles_t;
3467 +static inline cycles_t get_cycles (void)
3472 +#define CLOCK_TICK_RATE (HZ)
3476 +++ b/arch/um/include/uapi/asm/tlb.h
3481 +#include <linux/pagemap.h>
3482 +#include <linux/swap.h>
3483 +#include <asm/percpu.h>
3484 +#include <asm/pgalloc.h>
3485 +#include <asm/tlbflush.h>
3487 +#define tlb_start_vma(tlb, vma) do { } while (0)
3488 +#define tlb_end_vma(tlb, vma) do { } while (0)
3489 +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
3491 +/* struct mmu_gather is an opaque type used by the mm code for passing around
3492 + * any data needed by arch specific code for tlb_remove_page.
3494 +struct mmu_gather {
3495 + struct mm_struct *mm;
3496 + unsigned int need_flush; /* Really unmapped some ptes? */
3497 + unsigned long start;
3498 + unsigned long end;
3499 + unsigned int fullmm; /* non-zero means full mm flush */
3502 +static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
3503 + unsigned long address)
3505 + if (tlb->start > address)
3506 + tlb->start = address;
3507 + if (tlb->end < address + PAGE_SIZE)
3508 + tlb->end = address + PAGE_SIZE;
3511 +static inline void init_tlb_gather(struct mmu_gather *tlb)
3513 + tlb->need_flush = 0;
3515 + tlb->start = TASK_SIZE;
3518 + if (tlb->fullmm) {
3520 + tlb->end = TASK_SIZE;
3525 +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
3528 + tlb->start = start;
3530 + tlb->fullmm = !(start | (end+1));
3532 + init_tlb_gather(tlb);
3535 +extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
3536 + unsigned long end);
3539 +tlb_flush_mmu(struct mmu_gather *tlb)
3541 + if (!tlb->need_flush)
3544 + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
3545 + init_tlb_gather(tlb);
3549 + * Called at the end of the shootdown operation to free up any resources
3550 + * that were required.
3553 +tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
3555 + tlb_flush_mmu(tlb);
3557 + /* keep the page table cache within bounds */
3558 + check_pgt_cache();
3562 + * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
3563 + * while handling the additional races in SMP caused by other CPUs
3564 + * caching valid mappings in their TLBs.
3566 +static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
3568 + tlb->need_flush = 1;
3569 + free_page_and_swap_cache(page);
3570 + return 1; /* avoid calling tlb_flush_mmu */
3573 +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
3575 + __tlb_remove_page(tlb, page);
3579 + * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
3581 + * Record the fact that pte's were really umapped in ->need_flush, so we can
3582 + * later optimise away the tlb invalidate. This helps when userspace is
3583 + * unmapping already-unmapped pages, which happens quite a lot.
3585 +#define tlb_remove_tlb_entry(tlb, ptep, address) \
3587 + tlb->need_flush = 1; \
3588 + __tlb_remove_tlb_entry(tlb, ptep, address); \
3591 +#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
3593 +#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
3595 +#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
3597 +#define tlb_migrate_finish(mm) do {} while (0)
3601 +++ b/arch/um/include/uapi/asm/tlbflush.h
3604 + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3605 + * Licensed under the GPL
3608 +#ifndef __UM_TLBFLUSH_H
3609 +#define __UM_TLBFLUSH_H
3611 +#include <linux/mm.h>
3616 + * - flush_tlb() flushes the current mm struct TLBs
3617 + * - flush_tlb_all() flushes all processes TLBs
3618 + * - flush_tlb_mm(mm) flushes the specified mm context TLB's
3619 + * - flush_tlb_page(vma, vmaddr) flushes one page
3620 + * - flush_tlb_kernel_vm() flushes the kernel vm area
3621 + * - flush_tlb_range(vma, start, end) flushes a range of pages
3624 +extern void flush_tlb_all(void);
3625 +extern void flush_tlb_mm(struct mm_struct *mm);
3626 +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
3627 + unsigned long end);
3628 +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
3629 +extern void flush_tlb_kernel_vm(void);
3630 +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
3631 +extern void __flush_tlb_one(unsigned long addr);
3635 +++ b/arch/um/include/uapi/asm/uaccess.h
3638 + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3639 + * Licensed under the GPL
3642 +#ifndef __UM_UACCESS_H
3643 +#define __UM_UACCESS_H
3645 +/* thread_info has a mm_segment_t in it, so put the definition up here */
3647 + unsigned long seg;
3650 +#include <linux/thread_info.h>
3651 +#include <linux/errno.h>
3652 +#include <asm/processor.h>
3653 +#include <asm/elf.h>
3655 +#define VERIFY_READ 0
3656 +#define VERIFY_WRITE 1
3659 + * The fs value determines whether argument validity checking should be
3660 + * performed or not. If get_fs() == USER_DS, checking is performed, with
3661 + * get_fs() == KERNEL_DS, checking is bypassed.
3663 + * For historical reasons, these macros are grossly misnamed.
3666 +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
3668 +#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
3669 +#define USER_DS MAKE_MM_SEG(TASK_SIZE)
3671 +#define get_ds() (KERNEL_DS)
3672 +#define get_fs() (current_thread_info()->addr_limit)
3673 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
3675 +#define segment_eq(a, b) ((a).seg == (b).seg)
3677 +#define __under_task_size(addr, size) \
3678 + (((unsigned long) (addr) < TASK_SIZE) && \
3679 + (((unsigned long) (addr) + (size)) < TASK_SIZE))
3681 +#define __access_ok_vsyscall(type, addr, size) \
3682 + ((type == VERIFY_READ) && \
3683 + ((unsigned long) (addr) >= FIXADDR_USER_START) && \
3684 + ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
3685 + ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
3687 +#define __addr_range_nowrap(addr, size) \
3688 + ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
3690 +#define access_ok(type, addr, size) \
3691 + (__addr_range_nowrap(addr, size) && \
3692 + (__under_task_size(addr, size) || \
3693 + __access_ok_vsyscall(type, addr, size) || \
3694 + segment_eq(get_fs(), KERNEL_DS)))
3696 +extern int copy_from_user(void *to, const void __user *from, int n);
3697 +extern int copy_to_user(void __user *to, const void *from, int n);
3700 + * strncpy_from_user: - Copy a NUL terminated string from userspace.
3701 + * @dst: Destination address, in kernel space. This buffer must be at
3702 + * least @count bytes long.
3703 + * @src: Source address, in user space.
3704 + * @count: Maximum number of bytes to copy, including the trailing NUL.
3706 + * Copies a NUL-terminated string from userspace to kernel space.
3708 + * On success, returns the length of the string (not including the trailing
3711 + * If access to userspace fails, returns -EFAULT (some data may have been
3714 + * If @count is smaller than the length of the string, copies @count bytes
3715 + * and returns @count.
3718 +extern int strncpy_from_user(char *dst, const char __user *src, int count);
3721 + * __clear_user: - Zero a block of memory in user space, with less checking.
3722 + * @to: Destination address, in user space.
3723 + * @n: Number of bytes to zero.
3725 + * Zero a block of memory in user space. Caller must check
3726 + * the specified block with access_ok() before calling this function.
3728 + * Returns number of bytes that could not be cleared.
3729 + * On success, this will be zero.
3731 +extern int __clear_user(void __user *mem, int len);
3734 + * clear_user: - Zero a block of memory in user space.
3735 + * @to: Destination address, in user space.
3736 + * @n: Number of bytes to zero.
3738 + * Zero a block of memory in user space.
3740 + * Returns number of bytes that could not be cleared.
3741 + * On success, this will be zero.
3743 +extern int clear_user(void __user *mem, int len);
3746 + * strlen_user: - Get the size of a string in user space.
3747 + * @str: The string to measure.
3748 + * @n: The maximum valid length
3750 + * Get the size of a NUL-terminated string in user space.
3752 + * Returns the size of the string INCLUDING the terminating NUL.
3753 + * On exception, returns 0.
3754 + * If the string is too long, returns a value greater than @n.
3756 +extern int strnlen_user(const void __user *str, int len);
3758 +#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
3760 +#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
3762 +#define __copy_to_user_inatomic __copy_to_user
3763 +#define __copy_from_user_inatomic __copy_from_user
3765 +#define __get_user(x, ptr) \
3767 + const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
3768 + __typeof__(x) __private_val; \
3769 + int __private_ret = -EFAULT; \
3770 + (x) = (__typeof__(*(__private_ptr)))0; \
3771 + if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
3772 + sizeof(*(__private_ptr))) == 0) { \
3773 + (x) = (__typeof__(*(__private_ptr))) __private_val; \
3774 + __private_ret = 0; \
3779 +#define get_user(x, ptr) \
3781 + const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
3782 + (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
3783 + __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
3786 +#define __put_user(x, ptr) \
3788 + __typeof__(*(ptr)) __user *__private_ptr = ptr; \
3789 + __typeof__(*(__private_ptr)) __private_val; \
3790 + int __private_ret = -EFAULT; \
3791 + __private_val = (__typeof__(*(__private_ptr))) (x); \
3792 + if (__copy_to_user((__private_ptr), &__private_val, \
3793 + sizeof(*(__private_ptr))) == 0) { \
3794 + __private_ret = 0; \
3799 +#define put_user(x, ptr) \
3801 + __typeof__(*(ptr)) __user *private_ptr = (ptr); \
3802 + (access_ok(VERIFY_WRITE, private_ptr, sizeof(*private_ptr)) ? \
3803 + __put_user(x, private_ptr) : -EFAULT); \
3806 +#define strlen_user(str) strnlen_user(str, ~0U >> 1)
3808 +struct exception_table_entry
3810 + unsigned long insn;
3811 + unsigned long fixup;