uml: bump to 3.18
[openwrt/svn-archive/archive.git] / target / linux / uml / patches-3.18 / 001-fix_make_headers_install.patch
1 From 5eac4d66049ab7d14a2b7311610c8cb85a2c1bf1 Mon Sep 17 00:00:00 2001
2 From: Nicolas Thill <nico@openwrt.org>
3 Date: Fri, 20 Mar 2015 00:31:06 +0100
4 Subject: [PATCH] UM: fix make headers_install after UAPI header installation
5
6 Signed-off-by: Nicolas Thill <nico@openwrt.org>
7 ---
8 From faec6b6c2cc0219e74569c13f581fc11d8f3fc57 Mon Sep 17 00:00:00 2001
9 From: Florian Fainelli <florian@openwrt.org>
10 Date: Sun, 17 Mar 2013 20:12:10 +0100
11 Subject: [PATCH] UM: fix make headers_install after UAPI header installation
12
13 Commit 10b63956 (UAPI: Plumb the UAPI Kbuilds into the user
14 header installation and checking) breaks UML make headers_install with
15 the following:
16
17 $ ARCH=um make headers_install
18 CHK include/generated/uapi/linux/version.h
19 UPD include/generated/uapi/linux/version.h
20 HOSTCC scripts/basic/fixdep
21 WRAP arch/um/include/generated/asm/bug.h
22 [snip]
23 WRAP arch/um/include/generated/asm/trace_clock.h
24 SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_32.h
25 SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_64.h
26 SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_x32.h
27 SYSTBL arch/x86/syscalls/../include/generated/asm/syscalls_32.h
28 HOSTCC scripts/unifdef
29 Makefile:912: *** Headers not exportable for the um architecture. Stop.
30 zsh: exit 2 ARCH=um make headers_install
31
32 The reason for that is because the top-level Makefile does the
33 following:
34 $(if $(wildcard $(srctree)/arch/$(hdr-arch)/include/uapi/asm/Kbuild),, \
35 $(error Headers not exportable for the $(SRCARCH) architecture))
36
37 we end-up in the else part of the $(if) statement because UML still uses
38 the old path in arch/um/include/asm/Kbuild. This patch fixes the issue
39 by moving the header files to be in arch/um/include/uapi/asm/ thus
40 making headers_install (and other make targets checking for uapi) to
41 succeed.
42
43 Signed-off-by: Florian Fainelli <florian@openwrt.org>
44 ---
45 Richard, this has been broken for 3.7+ onwards, if you want me to send
46 you separate patches for 3.7 and 3.8 let me know. Thanks!
47
48
49 --- a/arch/um/include/asm/Kbuild
50 +++ /dev/null
51 @@ -1,30 +0,0 @@
52 -generic-y += barrier.h
53 -generic-y += bug.h
54 -generic-y += clkdev.h
55 -generic-y += cputime.h
56 -generic-y += current.h
57 -generic-y += delay.h
58 -generic-y += device.h
59 -generic-y += emergency-restart.h
60 -generic-y += exec.h
61 -generic-y += ftrace.h
62 -generic-y += futex.h
63 -generic-y += hardirq.h
64 -generic-y += hash.h
65 -generic-y += hw_irq.h
66 -generic-y += io.h
67 -generic-y += irq_regs.h
68 -generic-y += irq_work.h
69 -generic-y += kdebug.h
70 -generic-y += mcs_spinlock.h
71 -generic-y += mutex.h
72 -generic-y += param.h
73 -generic-y += pci.h
74 -generic-y += percpu.h
75 -generic-y += preempt.h
76 -generic-y += scatterlist.h
77 -generic-y += sections.h
78 -generic-y += switch_to.h
79 -generic-y += topology.h
80 -generic-y += trace_clock.h
81 -generic-y += xor.h
82 --- a/arch/um/include/asm/a.out-core.h
83 +++ /dev/null
84 @@ -1,27 +0,0 @@
85 -/* a.out coredump register dumper
86 - *
87 - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
88 - * Written by David Howells (dhowells@redhat.com)
89 - *
90 - * This program is free software; you can redistribute it and/or
91 - * modify it under the terms of the GNU General Public Licence
92 - * as published by the Free Software Foundation; either version
93 - * 2 of the Licence, or (at your option) any later version.
94 - */
95 -
96 -#ifndef __UM_A_OUT_CORE_H
97 -#define __UM_A_OUT_CORE_H
98 -
99 -#ifdef __KERNEL__
100 -
101 -#include <linux/user.h>
102 -
103 -/*
104 - * fill in the user structure for an a.out core dump
105 - */
106 -static inline void aout_dump_thread(struct pt_regs *regs, struct user *u)
107 -{
108 -}
109 -
110 -#endif /* __KERNEL__ */
111 -#endif /* __UM_A_OUT_CORE_H */
112 --- a/arch/um/include/asm/bugs.h
113 +++ /dev/null
114 @@ -1,6 +0,0 @@
115 -#ifndef __UM_BUGS_H
116 -#define __UM_BUGS_H
117 -
118 -void check_bugs(void);
119 -
120 -#endif
121 --- a/arch/um/include/asm/cache.h
122 +++ /dev/null
123 @@ -1,17 +0,0 @@
124 -#ifndef __UM_CACHE_H
125 -#define __UM_CACHE_H
126 -
127 -
128 -#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
129 -# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
130 -#elif defined(CONFIG_UML_X86) /* 64-bit */
131 -# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
132 -#else
133 -/* XXX: this was taken from x86, now it's completely random. Luckily only
134 - * affects SMP padding. */
135 -# define L1_CACHE_SHIFT 5
136 -#endif
137 -
138 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
139 -
140 -#endif
141 --- a/arch/um/include/asm/common.lds.S
142 +++ /dev/null
143 @@ -1,107 +0,0 @@
144 -#include <asm-generic/vmlinux.lds.h>
145 -
146 - .fini : { *(.fini) } =0x9090
147 - _etext = .;
148 - PROVIDE (etext = .);
149 -
150 - . = ALIGN(4096);
151 - _sdata = .;
152 - PROVIDE (sdata = .);
153 -
154 - RODATA
155 -
156 - .unprotected : { *(.unprotected) }
157 - . = ALIGN(4096);
158 - PROVIDE (_unprotected_end = .);
159 -
160 - . = ALIGN(4096);
161 - .note : { *(.note.*) }
162 - EXCEPTION_TABLE(0)
163 -
164 - BUG_TABLE
165 -
166 - .uml.setup.init : {
167 - __uml_setup_start = .;
168 - *(.uml.setup.init)
169 - __uml_setup_end = .;
170 - }
171 -
172 - .uml.help.init : {
173 - __uml_help_start = .;
174 - *(.uml.help.init)
175 - __uml_help_end = .;
176 - }
177 -
178 - .uml.postsetup.init : {
179 - __uml_postsetup_start = .;
180 - *(.uml.postsetup.init)
181 - __uml_postsetup_end = .;
182 - }
183 -
184 - .init.setup : {
185 - INIT_SETUP(0)
186 - }
187 -
188 - PERCPU_SECTION(32)
189 -
190 - .initcall.init : {
191 - INIT_CALLS
192 - }
193 -
194 - .con_initcall.init : {
195 - CON_INITCALL
196 - }
197 -
198 - .uml.initcall.init : {
199 - __uml_initcall_start = .;
200 - *(.uml.initcall.init)
201 - __uml_initcall_end = .;
202 - }
203 -
204 - SECURITY_INIT
205 -
206 - .exitcall : {
207 - __exitcall_begin = .;
208 - *(.exitcall.exit)
209 - __exitcall_end = .;
210 - }
211 -
212 - .uml.exitcall : {
213 - __uml_exitcall_begin = .;
214 - *(.uml.exitcall.exit)
215 - __uml_exitcall_end = .;
216 - }
217 -
218 - . = ALIGN(4);
219 - .altinstructions : {
220 - __alt_instructions = .;
221 - *(.altinstructions)
222 - __alt_instructions_end = .;
223 - }
224 - .altinstr_replacement : { *(.altinstr_replacement) }
225 - /* .exit.text is discard at runtime, not link time, to deal with references
226 - from .altinstructions and .eh_frame */
227 - .exit.text : { *(.exit.text) }
228 - .exit.data : { *(.exit.data) }
229 -
230 - .preinit_array : {
231 - __preinit_array_start = .;
232 - *(.preinit_array)
233 - __preinit_array_end = .;
234 - }
235 - .init_array : {
236 - __init_array_start = .;
237 - *(.init_array)
238 - __init_array_end = .;
239 - }
240 - .fini_array : {
241 - __fini_array_start = .;
242 - *(.fini_array)
243 - __fini_array_end = .;
244 - }
245 -
246 - . = ALIGN(4096);
247 - .init.ramfs : {
248 - INIT_RAM_FS
249 - }
250 -
251 --- a/arch/um/include/asm/dma.h
252 +++ /dev/null
253 @@ -1,10 +0,0 @@
254 -#ifndef __UM_DMA_H
255 -#define __UM_DMA_H
256 -
257 -#include <asm/io.h>
258 -
259 -extern unsigned long uml_physmem;
260 -
261 -#define MAX_DMA_ADDRESS (uml_physmem)
262 -
263 -#endif
264 --- a/arch/um/include/asm/fixmap.h
265 +++ /dev/null
266 @@ -1,60 +0,0 @@
267 -#ifndef __UM_FIXMAP_H
268 -#define __UM_FIXMAP_H
269 -
270 -#include <asm/processor.h>
271 -#include <asm/kmap_types.h>
272 -#include <asm/archparam.h>
273 -#include <asm/page.h>
274 -#include <linux/threads.h>
275 -
276 -/*
277 - * Here we define all the compile-time 'special' virtual
278 - * addresses. The point is to have a constant address at
279 - * compile time, but to set the physical address only
280 - * in the boot process. We allocate these special addresses
281 - * from the end of virtual memory (0xfffff000) backwards.
282 - * Also this lets us do fail-safe vmalloc(), we
283 - * can guarantee that these special addresses and
284 - * vmalloc()-ed addresses never overlap.
285 - *
286 - * these 'compile-time allocated' memory buffers are
287 - * fixed-size 4k pages. (or larger if used with an increment
288 - * highger than 1) use fixmap_set(idx,phys) to associate
289 - * physical memory with fixmap indices.
290 - *
291 - * TLB entries of such buffers will not be flushed across
292 - * task switches.
293 - */
294 -
295 -/*
296 - * on UP currently we will have no trace of the fixmap mechanizm,
297 - * no page table allocations, etc. This might change in the
298 - * future, say framebuffers for the console driver(s) could be
299 - * fix-mapped?
300 - */
301 -enum fixed_addresses {
302 -#ifdef CONFIG_HIGHMEM
303 - FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
304 - FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
305 -#endif
306 - __end_of_fixed_addresses
307 -};
308 -
309 -extern void __set_fixmap (enum fixed_addresses idx,
310 - unsigned long phys, pgprot_t flags);
311 -
312 -/*
313 - * used by vmalloc.c.
314 - *
315 - * Leave one empty page between vmalloc'ed areas and
316 - * the start of the fixmap, and leave one page empty
317 - * at the top of mem..
318 - */
319 -
320 -#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
321 -#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
322 -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
323 -
324 -#include <asm-generic/fixmap.h>
325 -
326 -#endif
327 --- a/arch/um/include/asm/irq.h
328 +++ /dev/null
329 @@ -1,23 +0,0 @@
330 -#ifndef __UM_IRQ_H
331 -#define __UM_IRQ_H
332 -
333 -#define TIMER_IRQ 0
334 -#define UMN_IRQ 1
335 -#define CONSOLE_IRQ 2
336 -#define CONSOLE_WRITE_IRQ 3
337 -#define UBD_IRQ 4
338 -#define UM_ETH_IRQ 5
339 -#define SSL_IRQ 6
340 -#define SSL_WRITE_IRQ 7
341 -#define ACCEPT_IRQ 8
342 -#define MCONSOLE_IRQ 9
343 -#define WINCH_IRQ 10
344 -#define SIGIO_WRITE_IRQ 11
345 -#define TELNETD_IRQ 12
346 -#define XTERM_IRQ 13
347 -#define RANDOM_IRQ 14
348 -
349 -#define LAST_IRQ RANDOM_IRQ
350 -#define NR_IRQS (LAST_IRQ + 1)
351 -
352 -#endif
353 --- a/arch/um/include/asm/irqflags.h
354 +++ /dev/null
355 @@ -1,42 +0,0 @@
356 -#ifndef __UM_IRQFLAGS_H
357 -#define __UM_IRQFLAGS_H
358 -
359 -extern int get_signals(void);
360 -extern int set_signals(int enable);
361 -extern void block_signals(void);
362 -extern void unblock_signals(void);
363 -
364 -static inline unsigned long arch_local_save_flags(void)
365 -{
366 - return get_signals();
367 -}
368 -
369 -static inline void arch_local_irq_restore(unsigned long flags)
370 -{
371 - set_signals(flags);
372 -}
373 -
374 -static inline void arch_local_irq_enable(void)
375 -{
376 - unblock_signals();
377 -}
378 -
379 -static inline void arch_local_irq_disable(void)
380 -{
381 - block_signals();
382 -}
383 -
384 -static inline unsigned long arch_local_irq_save(void)
385 -{
386 - unsigned long flags;
387 - flags = arch_local_save_flags();
388 - arch_local_irq_disable();
389 - return flags;
390 -}
391 -
392 -static inline bool arch_irqs_disabled(void)
393 -{
394 - return arch_local_save_flags() == 0;
395 -}
396 -
397 -#endif
398 --- a/arch/um/include/asm/kmap_types.h
399 +++ /dev/null
400 @@ -1,13 +0,0 @@
401 -/*
402 - * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
403 - * Licensed under the GPL
404 - */
405 -
406 -#ifndef __UM_KMAP_TYPES_H
407 -#define __UM_KMAP_TYPES_H
408 -
409 -/* No more #include "asm/arch/kmap_types.h" ! */
410 -
411 -#define KM_TYPE_NR 14
412 -
413 -#endif
414 --- a/arch/um/include/asm/kvm_para.h
415 +++ /dev/null
416 @@ -1 +0,0 @@
417 -#include <asm-generic/kvm_para.h>
418 --- a/arch/um/include/asm/mmu.h
419 +++ /dev/null
420 @@ -1,24 +0,0 @@
421 -/*
422 - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
423 - * Licensed under the GPL
424 - */
425 -
426 -#ifndef __ARCH_UM_MMU_H
427 -#define __ARCH_UM_MMU_H
428 -
429 -#include <mm_id.h>
430 -#include <asm/mm_context.h>
431 -
432 -typedef struct mm_context {
433 - struct mm_id id;
434 - struct uml_arch_mm_context arch;
435 - struct page *stub_pages[2];
436 -} mm_context_t;
437 -
438 -extern void __switch_mm(struct mm_id * mm_idp);
439 -
440 -/* Avoid tangled inclusion with asm/ldt.h */
441 -extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
442 -extern void free_ldt(struct mm_context *mm);
443 -
444 -#endif
445 --- a/arch/um/include/asm/mmu_context.h
446 +++ /dev/null
447 @@ -1,58 +0,0 @@
448 -/*
449 - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
450 - * Licensed under the GPL
451 - */
452 -
453 -#ifndef __UM_MMU_CONTEXT_H
454 -#define __UM_MMU_CONTEXT_H
455 -
456 -#include <linux/sched.h>
457 -#include <asm/mmu.h>
458 -
459 -extern void uml_setup_stubs(struct mm_struct *mm);
460 -extern void arch_exit_mmap(struct mm_struct *mm);
461 -
462 -#define deactivate_mm(tsk,mm) do { } while (0)
463 -
464 -extern void force_flush_all(void);
465 -
466 -static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
467 -{
468 - /*
469 - * This is called by fs/exec.c and sys_unshare()
470 - * when the new ->mm is used for the first time.
471 - */
472 - __switch_mm(&new->context.id);
473 - down_write(&new->mmap_sem);
474 - uml_setup_stubs(new);
475 - up_write(&new->mmap_sem);
476 -}
477 -
478 -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
479 - struct task_struct *tsk)
480 -{
481 - unsigned cpu = smp_processor_id();
482 -
483 - if(prev != next){
484 - cpumask_clear_cpu(cpu, mm_cpumask(prev));
485 - cpumask_set_cpu(cpu, mm_cpumask(next));
486 - if(next != &init_mm)
487 - __switch_mm(&next->context.id);
488 - }
489 -}
490 -
491 -static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
492 -{
493 - uml_setup_stubs(mm);
494 -}
495 -
496 -static inline void enter_lazy_tlb(struct mm_struct *mm,
497 - struct task_struct *tsk)
498 -{
499 -}
500 -
501 -extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
502 -
503 -extern void destroy_context(struct mm_struct *mm);
504 -
505 -#endif
506 --- a/arch/um/include/asm/page.h
507 +++ /dev/null
508 @@ -1,127 +0,0 @@
509 -/*
510 - * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
511 - * Copyright 2003 PathScale, Inc.
512 - * Licensed under the GPL
513 - */
514 -
515 -#ifndef __UM_PAGE_H
516 -#define __UM_PAGE_H
517 -
518 -#include <linux/const.h>
519 -
520 -/* PAGE_SHIFT determines the page size */
521 -#define PAGE_SHIFT 12
522 -#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
523 -#define PAGE_MASK (~(PAGE_SIZE-1))
524 -
525 -#ifndef __ASSEMBLY__
526 -
527 -struct page;
528 -
529 -#include <linux/types.h>
530 -#include <asm/vm-flags.h>
531 -
532 -/*
533 - * These are used to make use of C type-checking..
534 - */
535 -
536 -#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
537 -#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
538 -
539 -#define clear_user_page(page, vaddr, pg) clear_page(page)
540 -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
541 -
542 -#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
543 -
544 -typedef struct { unsigned long pte_low, pte_high; } pte_t;
545 -typedef struct { unsigned long pmd; } pmd_t;
546 -typedef struct { unsigned long pgd; } pgd_t;
547 -#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
548 -
549 -#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
550 -#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
551 -#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
552 -#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
553 - smp_wmb(); \
554 - (to).pte_low = (from).pte_low; })
555 -#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
556 -#define pte_set_val(pte, phys, prot) \
557 - ({ (pte).pte_high = (phys) >> 32; \
558 - (pte).pte_low = (phys) | pgprot_val(prot); })
559 -
560 -#define pmd_val(x) ((x).pmd)
561 -#define __pmd(x) ((pmd_t) { (x) } )
562 -
563 -typedef unsigned long long pfn_t;
564 -typedef unsigned long long phys_t;
565 -
566 -#else
567 -
568 -typedef struct { unsigned long pte; } pte_t;
569 -typedef struct { unsigned long pgd; } pgd_t;
570 -
571 -#ifdef CONFIG_3_LEVEL_PGTABLES
572 -typedef struct { unsigned long pmd; } pmd_t;
573 -#define pmd_val(x) ((x).pmd)
574 -#define __pmd(x) ((pmd_t) { (x) } )
575 -#endif
576 -
577 -#define pte_val(x) ((x).pte)
578 -
579 -
580 -#define pte_get_bits(p, bits) ((p).pte & (bits))
581 -#define pte_set_bits(p, bits) ((p).pte |= (bits))
582 -#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
583 -#define pte_copy(to, from) ((to).pte = (from).pte)
584 -#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
585 -#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
586 -
587 -typedef unsigned long pfn_t;
588 -typedef unsigned long phys_t;
589 -
590 -#endif
591 -
592 -typedef struct { unsigned long pgprot; } pgprot_t;
593 -
594 -typedef struct page *pgtable_t;
595 -
596 -#define pgd_val(x) ((x).pgd)
597 -#define pgprot_val(x) ((x).pgprot)
598 -
599 -#define __pte(x) ((pte_t) { (x) } )
600 -#define __pgd(x) ((pgd_t) { (x) } )
601 -#define __pgprot(x) ((pgprot_t) { (x) } )
602 -
603 -extern unsigned long uml_physmem;
604 -
605 -#define PAGE_OFFSET (uml_physmem)
606 -#define KERNELBASE PAGE_OFFSET
607 -
608 -#define __va_space (8*1024*1024)
609 -
610 -#include <mem.h>
611 -
612 -/* Cast to unsigned long before casting to void * to avoid a warning from
613 - * mmap_kmem about cutting a long long down to a void *. Not sure that
614 - * casting is the right thing, but 32-bit UML can't have 64-bit virtual
615 - * addresses
616 - */
617 -#define __pa(virt) to_phys((void *) (unsigned long) (virt))
618 -#define __va(phys) to_virt((unsigned long) (phys))
619 -
620 -#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
621 -#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
622 -
623 -#define pfn_valid(pfn) ((pfn) < max_mapnr)
624 -#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
625 -
626 -#include <asm-generic/memory_model.h>
627 -#include <asm-generic/getorder.h>
628 -
629 -#endif /* __ASSEMBLY__ */
630 -
631 -#ifdef CONFIG_X86_32
632 -#define __HAVE_ARCH_GATE_AREA 1
633 -#endif
634 -
635 -#endif /* __UM_PAGE_H */
636 --- a/arch/um/include/asm/pgalloc.h
637 +++ /dev/null
638 @@ -1,61 +0,0 @@
639 -/*
640 - * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
641 - * Copyright 2003 PathScale, Inc.
642 - * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
643 - * Licensed under the GPL
644 - */
645 -
646 -#ifndef __UM_PGALLOC_H
647 -#define __UM_PGALLOC_H
648 -
649 -#include <linux/mm.h>
650 -
651 -#define pmd_populate_kernel(mm, pmd, pte) \
652 - set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
653 -
654 -#define pmd_populate(mm, pmd, pte) \
655 - set_pmd(pmd, __pmd(_PAGE_TABLE + \
656 - ((unsigned long long)page_to_pfn(pte) << \
657 - (unsigned long long) PAGE_SHIFT)))
658 -#define pmd_pgtable(pmd) pmd_page(pmd)
659 -
660 -/*
661 - * Allocate and free page tables.
662 - */
663 -extern pgd_t *pgd_alloc(struct mm_struct *);
664 -extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
665 -
666 -extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
667 -extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
668 -
669 -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
670 -{
671 - free_page((unsigned long) pte);
672 -}
673 -
674 -static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
675 -{
676 - pgtable_page_dtor(pte);
677 - __free_page(pte);
678 -}
679 -
680 -#define __pte_free_tlb(tlb,pte, address) \
681 -do { \
682 - pgtable_page_dtor(pte); \
683 - tlb_remove_page((tlb),(pte)); \
684 -} while (0)
685 -
686 -#ifdef CONFIG_3_LEVEL_PGTABLES
687 -
688 -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
689 -{
690 - free_page((unsigned long)pmd);
691 -}
692 -
693 -#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
694 -#endif
695 -
696 -#define check_pgt_cache() do { } while (0)
697 -
698 -#endif
699 -
700 --- a/arch/um/include/asm/pgtable-2level.h
701 +++ /dev/null
702 @@ -1,53 +0,0 @@
703 -/*
704 - * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
705 - * Copyright 2003 PathScale, Inc.
706 - * Derived from include/asm-i386/pgtable.h
707 - * Licensed under the GPL
708 - */
709 -
710 -#ifndef __UM_PGTABLE_2LEVEL_H
711 -#define __UM_PGTABLE_2LEVEL_H
712 -
713 -#include <asm-generic/pgtable-nopmd.h>
714 -
715 -/* PGDIR_SHIFT determines what a third-level page table entry can map */
716 -
717 -#define PGDIR_SHIFT 22
718 -#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
719 -#define PGDIR_MASK (~(PGDIR_SIZE-1))
720 -
721 -/*
722 - * entries per page directory level: the i386 is two-level, so
723 - * we don't really have any PMD directory physically.
724 - */
725 -#define PTRS_PER_PTE 1024
726 -#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
727 -#define PTRS_PER_PGD 1024
728 -#define FIRST_USER_ADDRESS 0
729 -
730 -#define pte_ERROR(e) \
731 - printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
732 - pte_val(e))
733 -#define pgd_ERROR(e) \
734 - printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
735 - pgd_val(e))
736 -
737 -static inline int pgd_newpage(pgd_t pgd) { return 0; }
738 -static inline void pgd_mkuptodate(pgd_t pgd) { }
739 -
740 -#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
741 -
742 -#define pte_pfn(x) phys_to_pfn(pte_val(x))
743 -#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
744 -#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
745 -
746 -/*
747 - * Bits 0 through 4 are taken
748 - */
749 -#define PTE_FILE_MAX_BITS 27
750 -
751 -#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
752 -
753 -#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
754 -
755 -#endif
756 --- a/arch/um/include/asm/pgtable-3level.h
757 +++ /dev/null
758 @@ -1,136 +0,0 @@
759 -/*
760 - * Copyright 2003 PathScale Inc
761 - * Derived from include/asm-i386/pgtable.h
762 - * Licensed under the GPL
763 - */
764 -
765 -#ifndef __UM_PGTABLE_3LEVEL_H
766 -#define __UM_PGTABLE_3LEVEL_H
767 -
768 -#include <asm-generic/pgtable-nopud.h>
769 -
770 -/* PGDIR_SHIFT determines what a third-level page table entry can map */
771 -
772 -#ifdef CONFIG_64BIT
773 -#define PGDIR_SHIFT 30
774 -#else
775 -#define PGDIR_SHIFT 31
776 -#endif
777 -#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
778 -#define PGDIR_MASK (~(PGDIR_SIZE-1))
779 -
780 -/* PMD_SHIFT determines the size of the area a second-level page table can
781 - * map
782 - */
783 -
784 -#define PMD_SHIFT 21
785 -#define PMD_SIZE (1UL << PMD_SHIFT)
786 -#define PMD_MASK (~(PMD_SIZE-1))
787 -
788 -/*
789 - * entries per page directory level
790 - */
791 -
792 -#define PTRS_PER_PTE 512
793 -#ifdef CONFIG_64BIT
794 -#define PTRS_PER_PMD 512
795 -#define PTRS_PER_PGD 512
796 -#else
797 -#define PTRS_PER_PMD 1024
798 -#define PTRS_PER_PGD 1024
799 -#endif
800 -
801 -#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
802 -#define FIRST_USER_ADDRESS 0
803 -
804 -#define pte_ERROR(e) \
805 - printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
806 - pte_val(e))
807 -#define pmd_ERROR(e) \
808 - printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
809 - pmd_val(e))
810 -#define pgd_ERROR(e) \
811 - printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
812 - pgd_val(e))
813 -
814 -#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
815 -#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
816 -#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
817 -#define pud_populate(mm, pud, pmd) \
818 - set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
819 -
820 -#ifdef CONFIG_64BIT
821 -#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
822 -#else
823 -#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
824 -#endif
825 -
826 -static inline int pgd_newpage(pgd_t pgd)
827 -{
828 - return(pgd_val(pgd) & _PAGE_NEWPAGE);
829 -}
830 -
831 -static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
832 -
833 -#ifdef CONFIG_64BIT
834 -#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
835 -#else
836 -#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
837 -#endif
838 -
839 -struct mm_struct;
840 -extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
841 -
842 -static inline void pud_clear (pud_t *pud)
843 -{
844 - set_pud(pud, __pud(_PAGE_NEWPAGE));
845 -}
846 -
847 -#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
848 -#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
849 -
850 -/* Find an entry in the second-level page table.. */
851 -#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
852 - pmd_index(address))
853 -
854 -static inline unsigned long pte_pfn(pte_t pte)
855 -{
856 - return phys_to_pfn(pte_val(pte));
857 -}
858 -
859 -static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
860 -{
861 - pte_t pte;
862 - phys_t phys = pfn_to_phys(page_nr);
863 -
864 - pte_set_val(pte, phys, pgprot);
865 - return pte;
866 -}
867 -
868 -static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
869 -{
870 - return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
871 -}
872 -
873 -/*
874 - * Bits 0 through 3 are taken in the low part of the pte,
875 - * put the 32 bits of offset into the high part.
876 - */
877 -#define PTE_FILE_MAX_BITS 32
878 -
879 -#ifdef CONFIG_64BIT
880 -
881 -#define pte_to_pgoff(p) ((p).pte >> 32)
882 -
883 -#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
884 -
885 -#else
886 -
887 -#define pte_to_pgoff(pte) ((pte).pte_high)
888 -
889 -#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
890 -
891 -#endif
892 -
893 -#endif
894 -
895 --- a/arch/um/include/asm/pgtable.h
896 +++ /dev/null
897 @@ -1,375 +0,0 @@
898 -/*
899 - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
900 - * Copyright 2003 PathScale, Inc.
901 - * Derived from include/asm-i386/pgtable.h
902 - * Licensed under the GPL
903 - */
904 -
905 -#ifndef __UM_PGTABLE_H
906 -#define __UM_PGTABLE_H
907 -
908 -#include <asm/fixmap.h>
909 -
910 -#define _PAGE_PRESENT 0x001
911 -#define _PAGE_NEWPAGE 0x002
912 -#define _PAGE_NEWPROT 0x004
913 -#define _PAGE_RW 0x020
914 -#define _PAGE_USER 0x040
915 -#define _PAGE_ACCESSED 0x080
916 -#define _PAGE_DIRTY 0x100
917 -/* If _PAGE_PRESENT is clear, we use these: */
918 -#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
919 -#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
920 - pte_present gives true */
921 -
922 -#ifdef CONFIG_3_LEVEL_PGTABLES
923 -#include <asm/pgtable-3level.h>
924 -#else
925 -#include <asm/pgtable-2level.h>
926 -#endif
927 -
928 -extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
929 -
930 -/* zero page used for uninitialized stuff */
931 -extern unsigned long *empty_zero_page;
932 -
933 -#define pgtable_cache_init() do ; while (0)
934 -
935 -/* Just any arbitrary offset to the start of the vmalloc VM area: the
936 - * current 8MB value just means that there will be a 8MB "hole" after the
937 - * physical memory until the kernel virtual memory starts. That means that
938 - * any out-of-bounds memory accesses will hopefully be caught.
939 - * The vmalloc() routines leaves a hole of 4kB between each vmalloced
940 - * area for the same reason. ;)
941 - */
942 -
943 -extern unsigned long end_iomem;
944 -
945 -#define VMALLOC_OFFSET (__va_space)
946 -#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
947 -#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
948 -#ifdef CONFIG_HIGHMEM
949 -# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
950 -#else
951 -# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
952 -#endif
953 -#define MODULES_VADDR VMALLOC_START
954 -#define MODULES_END VMALLOC_END
955 -#define MODULES_LEN (MODULES_VADDR - MODULES_END)
956 -
957 -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
958 -#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
959 -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
960 -#define __PAGE_KERNEL_EXEC \
961 - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
962 -#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
963 -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
964 -#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
965 -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
966 -#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
967 -#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
968 -
969 -/*
970 - * The i386 can't do page protection for execute, and considers that the same
971 - * are read.
972 - * Also, write permissions imply read permissions. This is the closest we can
973 - * get..
974 - */
975 -#define __P000 PAGE_NONE
976 -#define __P001 PAGE_READONLY
977 -#define __P010 PAGE_COPY
978 -#define __P011 PAGE_COPY
979 -#define __P100 PAGE_READONLY
980 -#define __P101 PAGE_READONLY
981 -#define __P110 PAGE_COPY
982 -#define __P111 PAGE_COPY
983 -
984 -#define __S000 PAGE_NONE
985 -#define __S001 PAGE_READONLY
986 -#define __S010 PAGE_SHARED
987 -#define __S011 PAGE_SHARED
988 -#define __S100 PAGE_READONLY
989 -#define __S101 PAGE_READONLY
990 -#define __S110 PAGE_SHARED
991 -#define __S111 PAGE_SHARED
992 -
993 -/*
994 - * ZERO_PAGE is a global shared page that is always zero: used
995 - * for zero-mapped memory areas etc..
996 - */
997 -#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
998 -
999 -#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
1000 -
1001 -#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
1002 -#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
1003 -
1004 -#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
1005 -#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
1006 -
1007 -#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
1008 -#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
1009 -
1010 -#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
1011 -#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
1012 -
1013 -#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
1014 -
1015 -#define pte_page(x) pfn_to_page(pte_pfn(x))
1016 -
1017 -#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
1018 -
1019 -/*
1020 - * =================================
1021 - * Flags checking section.
1022 - * =================================
1023 - */
1024 -
1025 -static inline int pte_none(pte_t pte)
1026 -{
1027 - return pte_is_zero(pte);
1028 -}
1029 -
1030 -/*
1031 - * The following only work if pte_present() is true.
1032 - * Undefined behaviour if not..
1033 - */
1034 -static inline int pte_read(pte_t pte)
1035 -{
1036 - return((pte_get_bits(pte, _PAGE_USER)) &&
1037 - !(pte_get_bits(pte, _PAGE_PROTNONE)));
1038 -}
1039 -
1040 -static inline int pte_exec(pte_t pte){
1041 - return((pte_get_bits(pte, _PAGE_USER)) &&
1042 - !(pte_get_bits(pte, _PAGE_PROTNONE)));
1043 -}
1044 -
1045 -static inline int pte_write(pte_t pte)
1046 -{
1047 - return((pte_get_bits(pte, _PAGE_RW)) &&
1048 - !(pte_get_bits(pte, _PAGE_PROTNONE)));
1049 -}
1050 -
1051 -/*
1052 - * The following only works if pte_present() is not true.
1053 - */
1054 -static inline int pte_file(pte_t pte)
1055 -{
1056 - return pte_get_bits(pte, _PAGE_FILE);
1057 -}
1058 -
1059 -static inline int pte_dirty(pte_t pte)
1060 -{
1061 - return pte_get_bits(pte, _PAGE_DIRTY);
1062 -}
1063 -
1064 -static inline int pte_young(pte_t pte)
1065 -{
1066 - return pte_get_bits(pte, _PAGE_ACCESSED);
1067 -}
1068 -
1069 -static inline int pte_newpage(pte_t pte)
1070 -{
1071 - return pte_get_bits(pte, _PAGE_NEWPAGE);
1072 -}
1073 -
1074 -static inline int pte_newprot(pte_t pte)
1075 -{
1076 - return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
1077 -}
1078 -
1079 -static inline int pte_special(pte_t pte)
1080 -{
1081 - return 0;
1082 -}
1083 -
1084 -/*
1085 - * =================================
1086 - * Flags setting section.
1087 - * =================================
1088 - */
1089 -
1090 -static inline pte_t pte_mknewprot(pte_t pte)
1091 -{
1092 - pte_set_bits(pte, _PAGE_NEWPROT);
1093 - return(pte);
1094 -}
1095 -
1096 -static inline pte_t pte_mkclean(pte_t pte)
1097 -{
1098 - pte_clear_bits(pte, _PAGE_DIRTY);
1099 - return(pte);
1100 -}
1101 -
1102 -static inline pte_t pte_mkold(pte_t pte)
1103 -{
1104 - pte_clear_bits(pte, _PAGE_ACCESSED);
1105 - return(pte);
1106 -}
1107 -
1108 -static inline pte_t pte_wrprotect(pte_t pte)
1109 -{
1110 - pte_clear_bits(pte, _PAGE_RW);
1111 - return(pte_mknewprot(pte));
1112 -}
1113 -
1114 -static inline pte_t pte_mkread(pte_t pte)
1115 -{
1116 - pte_set_bits(pte, _PAGE_USER);
1117 - return(pte_mknewprot(pte));
1118 -}
1119 -
1120 -static inline pte_t pte_mkdirty(pte_t pte)
1121 -{
1122 - pte_set_bits(pte, _PAGE_DIRTY);
1123 - return(pte);
1124 -}
1125 -
1126 -static inline pte_t pte_mkyoung(pte_t pte)
1127 -{
1128 - pte_set_bits(pte, _PAGE_ACCESSED);
1129 - return(pte);
1130 -}
1131 -
1132 -static inline pte_t pte_mkwrite(pte_t pte)
1133 -{
1134 - pte_set_bits(pte, _PAGE_RW);
1135 - return(pte_mknewprot(pte));
1136 -}
1137 -
1138 -static inline pte_t pte_mkuptodate(pte_t pte)
1139 -{
1140 - pte_clear_bits(pte, _PAGE_NEWPAGE);
1141 - if(pte_present(pte))
1142 - pte_clear_bits(pte, _PAGE_NEWPROT);
1143 - return(pte);
1144 -}
1145 -
1146 -static inline pte_t pte_mknewpage(pte_t pte)
1147 -{
1148 - pte_set_bits(pte, _PAGE_NEWPAGE);
1149 - return(pte);
1150 -}
1151 -
1152 -static inline pte_t pte_mkspecial(pte_t pte)
1153 -{
1154 - return(pte);
1155 -}
1156 -
1157 -static inline void set_pte(pte_t *pteptr, pte_t pteval)
1158 -{
1159 - pte_copy(*pteptr, pteval);
1160 -
1161 - /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
1162 - * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
1163 - * mapped pages.
1164 - */
1165 -
1166 - *pteptr = pte_mknewpage(*pteptr);
1167 - if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
1168 -}
1169 -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
1170 -
1171 -#define __HAVE_ARCH_PTE_SAME
1172 -static inline int pte_same(pte_t pte_a, pte_t pte_b)
1173 -{
1174 - return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
1175 -}
1176 -
1177 -/*
1178 - * Conversion functions: convert a page and protection to a page entry,
1179 - * and a page entry and page directory to the page they refer to.
1180 - */
1181 -
1182 -#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
1183 -#define __virt_to_page(virt) phys_to_page(__pa(virt))
1184 -#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
1185 -#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
1186 -
1187 -#define mk_pte(page, pgprot) \
1188 - ({ pte_t pte; \
1189 - \
1190 - pte_set_val(pte, page_to_phys(page), (pgprot)); \
1191 - if (pte_present(pte)) \
1192 - pte_mknewprot(pte_mknewpage(pte)); \
1193 - pte;})
1194 -
1195 -static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
1196 -{
1197 - pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
1198 - return pte;
1199 -}
1200 -
1201 -/*
1202 - * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
1203 - *
1204 - * this macro returns the index of the entry in the pgd page which would
1205 - * control the given virtual address
1206 - */
1207 -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1208 -
1209 -/*
1210 - * pgd_offset() returns a (pgd_t *)
1211 - * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
1212 - */
1213 -#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
1214 -
1215 -/*
1216 - * a shortcut which implies the use of the kernel's pgd, instead
1217 - * of a process's
1218 - */
1219 -#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1220 -
1221 -/*
1222 - * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
1223 - *
1224 - * this macro returns the index of the entry in the pmd page which would
1225 - * control the given virtual address
1226 - */
1227 -#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
1228 -#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1229 -
1230 -#define pmd_page_vaddr(pmd) \
1231 - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
1232 -
1233 -/*
1234 - * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
1235 - *
1236 - * this macro returns the index of the entry in the pte page which would
1237 - * control the given virtual address
1238 - */
1239 -#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
1240 -#define pte_offset_kernel(dir, address) \
1241 - ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
1242 -#define pte_offset_map(dir, address) \
1243 - ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
1244 -#define pte_unmap(pte) do { } while (0)
1245 -
1246 -struct mm_struct;
1247 -extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
1248 -
1249 -#define update_mmu_cache(vma,address,ptep) do ; while (0)
1250 -
1251 -/* Encode and de-code a swap entry */
1252 -#define __swp_type(x) (((x).val >> 5) & 0x1f)
1253 -#define __swp_offset(x) ((x).val >> 11)
1254 -
1255 -#define __swp_entry(type, offset) \
1256 - ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
1257 -#define __pte_to_swp_entry(pte) \
1258 - ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
1259 -#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1260 -
1261 -#define kern_addr_valid(addr) (1)
1262 -
1263 -#include <asm-generic/pgtable.h>
1264 -
1265 -/* Clear a kernel PTE and flush it from the TLB */
1266 -#define kpte_clear_flush(ptep, vaddr) \
1267 -do { \
1268 - pte_clear(&init_mm, (vaddr), (ptep)); \
1269 - __flush_tlb_one((vaddr)); \
1270 -} while (0)
1271 -
1272 -#endif
1273 --- a/arch/um/include/asm/processor-generic.h
1274 +++ /dev/null
1275 @@ -1,115 +0,0 @@
1276 -/*
1277 - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
1278 - * Licensed under the GPL
1279 - */
1280 -
1281 -#ifndef __UM_PROCESSOR_GENERIC_H
1282 -#define __UM_PROCESSOR_GENERIC_H
1283 -
1284 -struct pt_regs;
1285 -
1286 -struct task_struct;
1287 -
1288 -#include <asm/ptrace.h>
1289 -#include <registers.h>
1290 -#include <sysdep/archsetjmp.h>
1291 -
1292 -#include <linux/prefetch.h>
1293 -
1294 -struct mm_struct;
1295 -
1296 -struct thread_struct {
1297 - struct pt_regs regs;
1298 - struct pt_regs *segv_regs;
1299 - int singlestep_syscall;
1300 - void *fault_addr;
1301 - jmp_buf *fault_catcher;
1302 - struct task_struct *prev_sched;
1303 - struct arch_thread arch;
1304 - jmp_buf switch_buf;
1305 - struct {
1306 - int op;
1307 - union {
1308 - struct {
1309 - int pid;
1310 - } fork, exec;
1311 - struct {
1312 - int (*proc)(void *);
1313 - void *arg;
1314 - } thread;
1315 - struct {
1316 - void (*proc)(void *);
1317 - void *arg;
1318 - } cb;
1319 - } u;
1320 - } request;
1321 -};
1322 -
1323 -#define INIT_THREAD \
1324 -{ \
1325 - .regs = EMPTY_REGS, \
1326 - .fault_addr = NULL, \
1327 - .prev_sched = NULL, \
1328 - .arch = INIT_ARCH_THREAD, \
1329 - .request = { 0 } \
1330 -}
1331 -
1332 -static inline void release_thread(struct task_struct *task)
1333 -{
1334 -}
1335 -
1336 -extern unsigned long thread_saved_pc(struct task_struct *t);
1337 -
1338 -static inline void mm_copy_segments(struct mm_struct *from_mm,
1339 - struct mm_struct *new_mm)
1340 -{
1341 -}
1342 -
1343 -#define init_stack (init_thread_union.stack)
1344 -
1345 -/*
1346 - * User space process size: 3GB (default).
1347 - */
1348 -extern unsigned long task_size;
1349 -
1350 -#define TASK_SIZE (task_size)
1351 -
1352 -#undef STACK_TOP
1353 -#undef STACK_TOP_MAX
1354 -
1355 -extern unsigned long stacksizelim;
1356 -
1357 -#define STACK_ROOM (stacksizelim)
1358 -#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
1359 -#define STACK_TOP_MAX STACK_TOP
1360 -
1361 -/* This decides where the kernel will search for a free chunk of vm
1362 - * space during mmap's.
1363 - */
1364 -#define TASK_UNMAPPED_BASE (0x40000000)
1365 -
1366 -extern void start_thread(struct pt_regs *regs, unsigned long entry,
1367 - unsigned long stack);
1368 -
1369 -struct cpuinfo_um {
1370 - unsigned long loops_per_jiffy;
1371 - int ipi_pipe[2];
1372 -};
1373 -
1374 -extern struct cpuinfo_um boot_cpu_data;
1375 -
1376 -#define my_cpu_data cpu_data[smp_processor_id()]
1377 -
1378 -#ifdef CONFIG_SMP
1379 -extern struct cpuinfo_um cpu_data[];
1380 -#define current_cpu_data cpu_data[smp_processor_id()]
1381 -#else
1382 -#define cpu_data (&boot_cpu_data)
1383 -#define current_cpu_data boot_cpu_data
1384 -#endif
1385 -
1386 -
1387 -#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
1388 -extern unsigned long get_wchan(struct task_struct *p);
1389 -
1390 -#endif
1391 --- a/arch/um/include/asm/ptrace-generic.h
1392 +++ /dev/null
1393 @@ -1,45 +0,0 @@
1394 -/*
1395 - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
1396 - * Licensed under the GPL
1397 - */
1398 -
1399 -#ifndef __UM_PTRACE_GENERIC_H
1400 -#define __UM_PTRACE_GENERIC_H
1401 -
1402 -#ifndef __ASSEMBLY__
1403 -
1404 -#include <asm/ptrace-abi.h>
1405 -#include <sysdep/ptrace.h>
1406 -
1407 -struct pt_regs {
1408 - struct uml_pt_regs regs;
1409 -};
1410 -
1411 -#define arch_has_single_step() (1)
1412 -
1413 -#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
1414 -
1415 -#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
1416 -#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
1417 -
1418 -#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs)
1419 -
1420 -#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs)
1421 -
1422 -#define instruction_pointer(regs) PT_REGS_IP(regs)
1423 -
1424 -struct task_struct;
1425 -
1426 -extern long subarch_ptrace(struct task_struct *child, long request,
1427 - unsigned long addr, unsigned long data);
1428 -extern unsigned long getreg(struct task_struct *child, int regno);
1429 -extern int putreg(struct task_struct *child, int regno, unsigned long value);
1430 -
1431 -extern int arch_copy_tls(struct task_struct *new);
1432 -extern void clear_flushed_tls(struct task_struct *task);
1433 -extern void syscall_trace_enter(struct pt_regs *regs);
1434 -extern void syscall_trace_leave(struct pt_regs *regs);
1435 -
1436 -#endif
1437 -
1438 -#endif
1439 --- a/arch/um/include/asm/setup.h
1440 +++ /dev/null
1441 @@ -1,10 +0,0 @@
1442 -#ifndef SETUP_H_INCLUDED
1443 -#define SETUP_H_INCLUDED
1444 -
1445 -/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the
1446 - * command line, so this choice is ok.
1447 - */
1448 -
1449 -#define COMMAND_LINE_SIZE 4096
1450 -
1451 -#endif /* SETUP_H_INCLUDED */
1452 --- a/arch/um/include/asm/smp.h
1453 +++ /dev/null
1454 @@ -1,32 +0,0 @@
1455 -#ifndef __UM_SMP_H
1456 -#define __UM_SMP_H
1457 -
1458 -#ifdef CONFIG_SMP
1459 -
1460 -#include <linux/bitops.h>
1461 -#include <asm/current.h>
1462 -#include <linux/cpumask.h>
1463 -
1464 -#define raw_smp_processor_id() (current_thread->cpu)
1465 -
1466 -#define cpu_logical_map(n) (n)
1467 -#define cpu_number_map(n) (n)
1468 -extern int hard_smp_processor_id(void);
1469 -#define NO_PROC_ID -1
1470 -
1471 -extern int ncpus;
1472 -
1473 -
1474 -static inline void smp_cpus_done(unsigned int maxcpus)
1475 -{
1476 -}
1477 -
1478 -extern struct task_struct *idle_threads[NR_CPUS];
1479 -
1480 -#else
1481 -
1482 -#define hard_smp_processor_id() 0
1483 -
1484 -#endif
1485 -
1486 -#endif
1487 --- a/arch/um/include/asm/stacktrace.h
1488 +++ /dev/null
1489 @@ -1,42 +0,0 @@
1490 -#ifndef _ASM_UML_STACKTRACE_H
1491 -#define _ASM_UML_STACKTRACE_H
1492 -
1493 -#include <linux/uaccess.h>
1494 -#include <linux/ptrace.h>
1495 -
1496 -struct stack_frame {
1497 - struct stack_frame *next_frame;
1498 - unsigned long return_address;
1499 -};
1500 -
1501 -struct stacktrace_ops {
1502 - void (*address)(void *data, unsigned long address, int reliable);
1503 -};
1504 -
1505 -#ifdef CONFIG_FRAME_POINTER
1506 -static inline unsigned long
1507 -get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
1508 -{
1509 - if (!task || task == current)
1510 - return segv_regs ? PT_REGS_BP(segv_regs) : current_bp();
1511 - return KSTK_EBP(task);
1512 -}
1513 -#else
1514 -static inline unsigned long
1515 -get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
1516 -{
1517 - return 0;
1518 -}
1519 -#endif
1520 -
1521 -static inline unsigned long
1522 -*get_stack_pointer(struct task_struct *task, struct pt_regs *segv_regs)
1523 -{
1524 - if (!task || task == current)
1525 - return segv_regs ? (unsigned long *)PT_REGS_SP(segv_regs) : current_sp();
1526 - return (unsigned long *)KSTK_ESP(task);
1527 -}
1528 -
1529 -void dump_trace(struct task_struct *tsk, const struct stacktrace_ops *ops, void *data);
1530 -
1531 -#endif /* _ASM_UML_STACKTRACE_H */
1532 --- a/arch/um/include/asm/sysrq.h
1533 +++ /dev/null
1534 @@ -1,7 +0,0 @@
1535 -#ifndef __UM_SYSRQ_H
1536 -#define __UM_SYSRQ_H
1537 -
1538 -struct task_struct;
1539 -extern void show_trace(struct task_struct* task, unsigned long *stack);
1540 -
1541 -#endif
1542 --- a/arch/um/include/asm/thread_info.h
1543 +++ /dev/null
1544 @@ -1,78 +0,0 @@
1545 -/*
1546 - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
1547 - * Licensed under the GPL
1548 - */
1549 -
1550 -#ifndef __UM_THREAD_INFO_H
1551 -#define __UM_THREAD_INFO_H
1552 -
1553 -#ifndef __ASSEMBLY__
1554 -
1555 -#include <asm/types.h>
1556 -#include <asm/page.h>
1557 -#include <asm/uaccess.h>
1558 -
1559 -struct thread_info {
1560 - struct task_struct *task; /* main task structure */
1561 - struct exec_domain *exec_domain; /* execution domain */
1562 - unsigned long flags; /* low level flags */
1563 - __u32 cpu; /* current CPU */
1564 - int preempt_count; /* 0 => preemptable,
1565 - <0 => BUG */
1566 - mm_segment_t addr_limit; /* thread address space:
1567 - 0-0xBFFFFFFF for user
1568 - 0-0xFFFFFFFF for kernel */
1569 - struct restart_block restart_block;
1570 - struct thread_info *real_thread; /* Points to non-IRQ stack */
1571 -};
1572 -
1573 -#define INIT_THREAD_INFO(tsk) \
1574 -{ \
1575 - .task = &tsk, \
1576 - .exec_domain = &default_exec_domain, \
1577 - .flags = 0, \
1578 - .cpu = 0, \
1579 - .preempt_count = INIT_PREEMPT_COUNT, \
1580 - .addr_limit = KERNEL_DS, \
1581 - .restart_block = { \
1582 - .fn = do_no_restart_syscall, \
1583 - }, \
1584 - .real_thread = NULL, \
1585 -}
1586 -
1587 -#define init_thread_info (init_thread_union.thread_info)
1588 -#define init_stack (init_thread_union.stack)
1589 -
1590 -#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
1591 -/* how to get the thread information struct from C */
1592 -static inline struct thread_info *current_thread_info(void)
1593 -{
1594 - struct thread_info *ti;
1595 - unsigned long mask = THREAD_SIZE - 1;
1596 - void *p;
1597 -
1598 - asm volatile ("" : "=r" (p) : "0" (&ti));
1599 - ti = (struct thread_info *) (((unsigned long)p) & ~mask);
1600 - return ti;
1601 -}
1602 -
1603 -#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
1604 -
1605 -#endif
1606 -
1607 -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
1608 -#define TIF_SIGPENDING 1 /* signal pending */
1609 -#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
1610 -#define TIF_RESTART_BLOCK 4
1611 -#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
1612 -#define TIF_SYSCALL_AUDIT 6
1613 -#define TIF_RESTORE_SIGMASK 7
1614 -#define TIF_NOTIFY_RESUME 8
1615 -
1616 -#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
1617 -#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
1618 -#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
1619 -#define _TIF_MEMDIE (1 << TIF_MEMDIE)
1620 -#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
1621 -
1622 -#endif
1623 --- a/arch/um/include/asm/timex.h
1624 +++ /dev/null
1625 @@ -1,13 +0,0 @@
1626 -#ifndef __UM_TIMEX_H
1627 -#define __UM_TIMEX_H
1628 -
1629 -typedef unsigned long cycles_t;
1630 -
1631 -static inline cycles_t get_cycles (void)
1632 -{
1633 - return 0;
1634 -}
1635 -
1636 -#define CLOCK_TICK_RATE (HZ)
1637 -
1638 -#endif
1639 --- a/arch/um/include/asm/tlb.h
1640 +++ /dev/null
1641 @@ -1,134 +0,0 @@
1642 -#ifndef __UM_TLB_H
1643 -#define __UM_TLB_H
1644 -
1645 -#include <linux/pagemap.h>
1646 -#include <linux/swap.h>
1647 -#include <asm/percpu.h>
1648 -#include <asm/pgalloc.h>
1649 -#include <asm/tlbflush.h>
1650 -
1651 -#define tlb_start_vma(tlb, vma) do { } while (0)
1652 -#define tlb_end_vma(tlb, vma) do { } while (0)
1653 -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
1654 -
1655 -/* struct mmu_gather is an opaque type used by the mm code for passing around
1656 - * any data needed by arch specific code for tlb_remove_page.
1657 - */
1658 -struct mmu_gather {
1659 - struct mm_struct *mm;
1660 - unsigned int need_flush; /* Really unmapped some ptes? */
1661 - unsigned long start;
1662 - unsigned long end;
1663 - unsigned int fullmm; /* non-zero means full mm flush */
1664 -};
1665 -
1666 -static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
1667 - unsigned long address)
1668 -{
1669 - if (tlb->start > address)
1670 - tlb->start = address;
1671 - if (tlb->end < address + PAGE_SIZE)
1672 - tlb->end = address + PAGE_SIZE;
1673 -}
1674 -
1675 -static inline void init_tlb_gather(struct mmu_gather *tlb)
1676 -{
1677 - tlb->need_flush = 0;
1678 -
1679 - tlb->start = TASK_SIZE;
1680 - tlb->end = 0;
1681 -
1682 - if (tlb->fullmm) {
1683 - tlb->start = 0;
1684 - tlb->end = TASK_SIZE;
1685 - }
1686 -}
1687 -
1688 -static inline void
1689 -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
1690 -{
1691 - tlb->mm = mm;
1692 - tlb->start = start;
1693 - tlb->end = end;
1694 - tlb->fullmm = !(start | (end+1));
1695 -
1696 - init_tlb_gather(tlb);
1697 -}
1698 -
1699 -extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
1700 - unsigned long end);
1701 -
1702 -static inline void
1703 -tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
1704 -{
1705 - flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
1706 -}
1707 -
1708 -static inline void
1709 -tlb_flush_mmu_free(struct mmu_gather *tlb)
1710 -{
1711 - init_tlb_gather(tlb);
1712 -}
1713 -
1714 -static inline void
1715 -tlb_flush_mmu(struct mmu_gather *tlb)
1716 -{
1717 - if (!tlb->need_flush)
1718 - return;
1719 -
1720 - tlb_flush_mmu_tlbonly(tlb);
1721 - tlb_flush_mmu_free(tlb);
1722 -}
1723 -
1724 -/* tlb_finish_mmu
1725 - * Called at the end of the shootdown operation to free up any resources
1726 - * that were required.
1727 - */
1728 -static inline void
1729 -tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
1730 -{
1731 - tlb_flush_mmu(tlb);
1732 -
1733 - /* keep the page table cache within bounds */
1734 - check_pgt_cache();
1735 -}
1736 -
1737 -/* tlb_remove_page
1738 - * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
1739 - * while handling the additional races in SMP caused by other CPUs
1740 - * caching valid mappings in their TLBs.
1741 - */
1742 -static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
1743 -{
1744 - tlb->need_flush = 1;
1745 - free_page_and_swap_cache(page);
1746 - return 1; /* avoid calling tlb_flush_mmu */
1747 -}
1748 -
1749 -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
1750 -{
1751 - __tlb_remove_page(tlb, page);
1752 -}
1753 -
1754 -/**
1755 - * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
1756 - *
1757 - * Record the fact that pte's were really umapped in ->need_flush, so we can
1758 - * later optimise away the tlb invalidate. This helps when userspace is
1759 - * unmapping already-unmapped pages, which happens quite a lot.
1760 - */
1761 -#define tlb_remove_tlb_entry(tlb, ptep, address) \
1762 - do { \
1763 - tlb->need_flush = 1; \
1764 - __tlb_remove_tlb_entry(tlb, ptep, address); \
1765 - } while (0)
1766 -
1767 -#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
1768 -
1769 -#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
1770 -
1771 -#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
1772 -
1773 -#define tlb_migrate_finish(mm) do {} while (0)
1774 -
1775 -#endif
1776 --- a/arch/um/include/asm/tlbflush.h
1777 +++ /dev/null
1778 @@ -1,31 +0,0 @@
1779 -/*
1780 - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
1781 - * Licensed under the GPL
1782 - */
1783 -
1784 -#ifndef __UM_TLBFLUSH_H
1785 -#define __UM_TLBFLUSH_H
1786 -
1787 -#include <linux/mm.h>
1788 -
1789 -/*
1790 - * TLB flushing:
1791 - *
1792 - * - flush_tlb() flushes the current mm struct TLBs
1793 - * - flush_tlb_all() flushes all processes TLBs
1794 - * - flush_tlb_mm(mm) flushes the specified mm context TLB's
1795 - * - flush_tlb_page(vma, vmaddr) flushes one page
1796 - * - flush_tlb_kernel_vm() flushes the kernel vm area
1797 - * - flush_tlb_range(vma, start, end) flushes a range of pages
1798 - */
1799 -
1800 -extern void flush_tlb_all(void);
1801 -extern void flush_tlb_mm(struct mm_struct *mm);
1802 -extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
1803 - unsigned long end);
1804 -extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
1805 -extern void flush_tlb_kernel_vm(void);
1806 -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
1807 -extern void __flush_tlb_one(unsigned long addr);
1808 -
1809 -#endif
1810 --- a/arch/um/include/asm/uaccess.h
1811 +++ /dev/null
1812 @@ -1,178 +0,0 @@
1813 -/*
1814 - * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
1815 - * Licensed under the GPL
1816 - */
1817 -
1818 -#ifndef __UM_UACCESS_H
1819 -#define __UM_UACCESS_H
1820 -
1821 -/* thread_info has a mm_segment_t in it, so put the definition up here */
1822 -typedef struct {
1823 - unsigned long seg;
1824 -} mm_segment_t;
1825 -
1826 -#include <linux/thread_info.h>
1827 -#include <linux/errno.h>
1828 -#include <asm/processor.h>
1829 -#include <asm/elf.h>
1830 -
1831 -#define VERIFY_READ 0
1832 -#define VERIFY_WRITE 1
1833 -
1834 -/*
1835 - * The fs value determines whether argument validity checking should be
1836 - * performed or not. If get_fs() == USER_DS, checking is performed, with
1837 - * get_fs() == KERNEL_DS, checking is bypassed.
1838 - *
1839 - * For historical reasons, these macros are grossly misnamed.
1840 - */
1841 -
1842 -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
1843 -
1844 -#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
1845 -#define USER_DS MAKE_MM_SEG(TASK_SIZE)
1846 -
1847 -#define get_ds() (KERNEL_DS)
1848 -#define get_fs() (current_thread_info()->addr_limit)
1849 -#define set_fs(x) (current_thread_info()->addr_limit = (x))
1850 -
1851 -#define segment_eq(a, b) ((a).seg == (b).seg)
1852 -
1853 -#define __under_task_size(addr, size) \
1854 - (((unsigned long) (addr) < TASK_SIZE) && \
1855 - (((unsigned long) (addr) + (size)) < TASK_SIZE))
1856 -
1857 -#define __access_ok_vsyscall(type, addr, size) \
1858 - ((type == VERIFY_READ) && \
1859 - ((unsigned long) (addr) >= FIXADDR_USER_START) && \
1860 - ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
1861 - ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
1862 -
1863 -#define __addr_range_nowrap(addr, size) \
1864 - ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
1865 -
1866 -#define access_ok(type, addr, size) \
1867 - (__addr_range_nowrap(addr, size) && \
1868 - (__under_task_size(addr, size) || \
1869 - __access_ok_vsyscall(type, addr, size) || \
1870 - segment_eq(get_fs(), KERNEL_DS)))
1871 -
1872 -extern int copy_from_user(void *to, const void __user *from, int n);
1873 -extern int copy_to_user(void __user *to, const void *from, int n);
1874 -
1875 -/*
1876 - * strncpy_from_user: - Copy a NUL terminated string from userspace.
1877 - * @dst: Destination address, in kernel space. This buffer must be at
1878 - * least @count bytes long.
1879 - * @src: Source address, in user space.
1880 - * @count: Maximum number of bytes to copy, including the trailing NUL.
1881 - *
1882 - * Copies a NUL-terminated string from userspace to kernel space.
1883 - *
1884 - * On success, returns the length of the string (not including the trailing
1885 - * NUL).
1886 - *
1887 - * If access to userspace fails, returns -EFAULT (some data may have been
1888 - * copied).
1889 - *
1890 - * If @count is smaller than the length of the string, copies @count bytes
1891 - * and returns @count.
1892 - */
1893 -
1894 -extern int strncpy_from_user(char *dst, const char __user *src, int count);
1895 -
1896 -/*
1897 - * __clear_user: - Zero a block of memory in user space, with less checking.
1898 - * @to: Destination address, in user space.
1899 - * @n: Number of bytes to zero.
1900 - *
1901 - * Zero a block of memory in user space. Caller must check
1902 - * the specified block with access_ok() before calling this function.
1903 - *
1904 - * Returns number of bytes that could not be cleared.
1905 - * On success, this will be zero.
1906 - */
1907 -extern int __clear_user(void __user *mem, int len);
1908 -
1909 -/*
1910 - * clear_user: - Zero a block of memory in user space.
1911 - * @to: Destination address, in user space.
1912 - * @n: Number of bytes to zero.
1913 - *
1914 - * Zero a block of memory in user space.
1915 - *
1916 - * Returns number of bytes that could not be cleared.
1917 - * On success, this will be zero.
1918 - */
1919 -extern int clear_user(void __user *mem, int len);
1920 -
1921 -/*
1922 - * strlen_user: - Get the size of a string in user space.
1923 - * @str: The string to measure.
1924 - * @n: The maximum valid length
1925 - *
1926 - * Get the size of a NUL-terminated string in user space.
1927 - *
1928 - * Returns the size of the string INCLUDING the terminating NUL.
1929 - * On exception, returns 0.
1930 - * If the string is too long, returns a value greater than @n.
1931 - */
1932 -extern int strnlen_user(const void __user *str, int len);
1933 -
1934 -#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
1935 -
1936 -#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
1937 -
1938 -#define __copy_to_user_inatomic __copy_to_user
1939 -#define __copy_from_user_inatomic __copy_from_user
1940 -
1941 -#define __get_user(x, ptr) \
1942 -({ \
1943 - const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
1944 - __typeof__(x) __private_val; \
1945 - int __private_ret = -EFAULT; \
1946 - (x) = (__typeof__(*(__private_ptr)))0; \
1947 - if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
1948 - sizeof(*(__private_ptr))) == 0) { \
1949 - (x) = (__typeof__(*(__private_ptr))) __private_val; \
1950 - __private_ret = 0; \
1951 - } \
1952 - __private_ret; \
1953 -})
1954 -
1955 -#define get_user(x, ptr) \
1956 -({ \
1957 - const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
1958 - (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
1959 - __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
1960 -})
1961 -
1962 -#define __put_user(x, ptr) \
1963 -({ \
1964 - __typeof__(*(ptr)) __user *__private_ptr = ptr; \
1965 - __typeof__(*(__private_ptr)) __private_val; \
1966 - int __private_ret = -EFAULT; \
1967 - __private_val = (__typeof__(*(__private_ptr))) (x); \
1968 - if (__copy_to_user((__private_ptr), &__private_val, \
1969 - sizeof(*(__private_ptr))) == 0) { \
1970 - __private_ret = 0; \
1971 - } \
1972 - __private_ret; \
1973 -})
1974 -
1975 -#define put_user(x, ptr) \
1976 -({ \
1977 - __typeof__(*(ptr)) __user *private_ptr = (ptr); \
1978 - (access_ok(VERIFY_WRITE, private_ptr, sizeof(*private_ptr)) ? \
1979 - __put_user(x, private_ptr) : -EFAULT); \
1980 -})
1981 -
1982 -#define strlen_user(str) strnlen_user(str, ~0U >> 1)
1983 -
1984 -struct exception_table_entry
1985 -{
1986 - unsigned long insn;
1987 - unsigned long fixup;
1988 -};
1989 -
1990 -#endif
1991 --- /dev/null
1992 +++ b/arch/um/include/uapi/asm/Kbuild
1993 @@ -0,0 +1,30 @@
1994 +generic-y += barrier.h
1995 +generic-y += bug.h
1996 +generic-y += clkdev.h
1997 +generic-y += cputime.h
1998 +generic-y += current.h
1999 +generic-y += delay.h
2000 +generic-y += device.h
2001 +generic-y += emergency-restart.h
2002 +generic-y += exec.h
2003 +generic-y += ftrace.h
2004 +generic-y += futex.h
2005 +generic-y += hardirq.h
2006 +generic-y += hash.h
2007 +generic-y += hw_irq.h
2008 +generic-y += io.h
2009 +generic-y += irq_regs.h
2010 +generic-y += irq_work.h
2011 +generic-y += kdebug.h
2012 +generic-y += mcs_spinlock.h
2013 +generic-y += mutex.h
2014 +generic-y += param.h
2015 +generic-y += pci.h
2016 +generic-y += percpu.h
2017 +generic-y += preempt.h
2018 +generic-y += scatterlist.h
2019 +generic-y += sections.h
2020 +generic-y += switch_to.h
2021 +generic-y += topology.h
2022 +generic-y += trace_clock.h
2023 +generic-y += xor.h
2024 --- /dev/null
2025 +++ b/arch/um/include/uapi/asm/a.out-core.h
2026 @@ -0,0 +1,27 @@
2027 +/* a.out coredump register dumper
2028 + *
2029 + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
2030 + * Written by David Howells (dhowells@redhat.com)
2031 + *
2032 + * This program is free software; you can redistribute it and/or
2033 + * modify it under the terms of the GNU General Public Licence
2034 + * as published by the Free Software Foundation; either version
2035 + * 2 of the Licence, or (at your option) any later version.
2036 + */
2037 +
2038 +#ifndef __UM_A_OUT_CORE_H
2039 +#define __UM_A_OUT_CORE_H
2040 +
2041 +#ifdef __KERNEL__
2042 +
2043 +#include <linux/user.h>
2044 +
2045 +/*
2046 + * fill in the user structure for an a.out core dump
2047 + */
2048 +static inline void aout_dump_thread(struct pt_regs *regs, struct user *u)
2049 +{
2050 +}
2051 +
2052 +#endif /* __KERNEL__ */
2053 +#endif /* __UM_A_OUT_CORE_H */
2054 --- /dev/null
2055 +++ b/arch/um/include/uapi/asm/bugs.h
2056 @@ -0,0 +1,6 @@
2057 +#ifndef __UM_BUGS_H
2058 +#define __UM_BUGS_H
2059 +
2060 +void check_bugs(void);
2061 +
2062 +#endif
2063 --- /dev/null
2064 +++ b/arch/um/include/uapi/asm/cache.h
2065 @@ -0,0 +1,17 @@
2066 +#ifndef __UM_CACHE_H
2067 +#define __UM_CACHE_H
2068 +
2069 +
2070 +#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
2071 +# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
2072 +#elif defined(CONFIG_UML_X86) /* 64-bit */
2073 +# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
2074 +#else
2075 +/* XXX: this was taken from x86, now it's completely random. Luckily only
2076 + * affects SMP padding. */
2077 +# define L1_CACHE_SHIFT 5
2078 +#endif
2079 +
2080 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2081 +
2082 +#endif
2083 --- /dev/null
2084 +++ b/arch/um/include/uapi/asm/common.lds.S
2085 @@ -0,0 +1,107 @@
2086 +#include <asm-generic/vmlinux.lds.h>
2087 +
2088 + .fini : { *(.fini) } =0x9090
2089 + _etext = .;
2090 + PROVIDE (etext = .);
2091 +
2092 + . = ALIGN(4096);
2093 + _sdata = .;
2094 + PROVIDE (sdata = .);
2095 +
2096 + RODATA
2097 +
2098 + .unprotected : { *(.unprotected) }
2099 + . = ALIGN(4096);
2100 + PROVIDE (_unprotected_end = .);
2101 +
2102 + . = ALIGN(4096);
2103 + .note : { *(.note.*) }
2104 + EXCEPTION_TABLE(0)
2105 +
2106 + BUG_TABLE
2107 +
2108 + .uml.setup.init : {
2109 + __uml_setup_start = .;
2110 + *(.uml.setup.init)
2111 + __uml_setup_end = .;
2112 + }
2113 +
2114 + .uml.help.init : {
2115 + __uml_help_start = .;
2116 + *(.uml.help.init)
2117 + __uml_help_end = .;
2118 + }
2119 +
2120 + .uml.postsetup.init : {
2121 + __uml_postsetup_start = .;
2122 + *(.uml.postsetup.init)
2123 + __uml_postsetup_end = .;
2124 + }
2125 +
2126 + .init.setup : {
2127 + INIT_SETUP(0)
2128 + }
2129 +
2130 + PERCPU_SECTION(32)
2131 +
2132 + .initcall.init : {
2133 + INIT_CALLS
2134 + }
2135 +
2136 + .con_initcall.init : {
2137 + CON_INITCALL
2138 + }
2139 +
2140 + .uml.initcall.init : {
2141 + __uml_initcall_start = .;
2142 + *(.uml.initcall.init)
2143 + __uml_initcall_end = .;
2144 + }
2145 +
2146 + SECURITY_INIT
2147 +
2148 + .exitcall : {
2149 + __exitcall_begin = .;
2150 + *(.exitcall.exit)
2151 + __exitcall_end = .;
2152 + }
2153 +
2154 + .uml.exitcall : {
2155 + __uml_exitcall_begin = .;
2156 + *(.uml.exitcall.exit)
2157 + __uml_exitcall_end = .;
2158 + }
2159 +
2160 + . = ALIGN(4);
2161 + .altinstructions : {
2162 + __alt_instructions = .;
2163 + *(.altinstructions)
2164 + __alt_instructions_end = .;
2165 + }
2166 + .altinstr_replacement : { *(.altinstr_replacement) }
2167 + /* .exit.text is discard at runtime, not link time, to deal with references
2168 + from .altinstructions and .eh_frame */
2169 + .exit.text : { *(.exit.text) }
2170 + .exit.data : { *(.exit.data) }
2171 +
2172 + .preinit_array : {
2173 + __preinit_array_start = .;
2174 + *(.preinit_array)
2175 + __preinit_array_end = .;
2176 + }
2177 + .init_array : {
2178 + __init_array_start = .;
2179 + *(.init_array)
2180 + __init_array_end = .;
2181 + }
2182 + .fini_array : {
2183 + __fini_array_start = .;
2184 + *(.fini_array)
2185 + __fini_array_end = .;
2186 + }
2187 +
2188 + . = ALIGN(4096);
2189 + .init.ramfs : {
2190 + INIT_RAM_FS
2191 + }
2192 +
2193 --- /dev/null
2194 +++ b/arch/um/include/uapi/asm/dma.h
2195 @@ -0,0 +1,10 @@
2196 +#ifndef __UM_DMA_H
2197 +#define __UM_DMA_H
2198 +
2199 +#include <asm/io.h>
2200 +
2201 +extern unsigned long uml_physmem;
2202 +
2203 +#define MAX_DMA_ADDRESS (uml_physmem)
2204 +
2205 +#endif
2206 --- /dev/null
2207 +++ b/arch/um/include/uapi/asm/fixmap.h
2208 @@ -0,0 +1,60 @@
2209 +#ifndef __UM_FIXMAP_H
2210 +#define __UM_FIXMAP_H
2211 +
2212 +#include <asm/processor.h>
2213 +#include <asm/kmap_types.h>
2214 +#include <asm/archparam.h>
2215 +#include <asm/page.h>
2216 +#include <linux/threads.h>
2217 +
2218 +/*
2219 + * Here we define all the compile-time 'special' virtual
2220 + * addresses. The point is to have a constant address at
2221 + * compile time, but to set the physical address only
2222 + * in the boot process. We allocate these special addresses
2223 + * from the end of virtual memory (0xfffff000) backwards.
2224 + * Also this lets us do fail-safe vmalloc(), we
2225 + * can guarantee that these special addresses and
2226 + * vmalloc()-ed addresses never overlap.
2227 + *
2228 + * these 'compile-time allocated' memory buffers are
2229 + * fixed-size 4k pages. (or larger if used with an increment
2230 + * highger than 1) use fixmap_set(idx,phys) to associate
2231 + * physical memory with fixmap indices.
2232 + *
2233 + * TLB entries of such buffers will not be flushed across
2234 + * task switches.
2235 + */
2236 +
2237 +/*
2238 + * on UP currently we will have no trace of the fixmap mechanizm,
2239 + * no page table allocations, etc. This might change in the
2240 + * future, say framebuffers for the console driver(s) could be
2241 + * fix-mapped?
2242 + */
2243 +enum fixed_addresses {
2244 +#ifdef CONFIG_HIGHMEM
2245 + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
2246 + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
2247 +#endif
2248 + __end_of_fixed_addresses
2249 +};
2250 +
2251 +extern void __set_fixmap (enum fixed_addresses idx,
2252 + unsigned long phys, pgprot_t flags);
2253 +
2254 +/*
2255 + * used by vmalloc.c.
2256 + *
2257 + * Leave one empty page between vmalloc'ed areas and
2258 + * the start of the fixmap, and leave one page empty
2259 + * at the top of mem..
2260 + */
2261 +
2262 +#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
2263 +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
2264 +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
2265 +
2266 +#include <asm-generic/fixmap.h>
2267 +
2268 +#endif
2269 --- /dev/null
2270 +++ b/arch/um/include/uapi/asm/irq.h
2271 @@ -0,0 +1,23 @@
2272 +#ifndef __UM_IRQ_H
2273 +#define __UM_IRQ_H
2274 +
2275 +#define TIMER_IRQ 0
2276 +#define UMN_IRQ 1
2277 +#define CONSOLE_IRQ 2
2278 +#define CONSOLE_WRITE_IRQ 3
2279 +#define UBD_IRQ 4
2280 +#define UM_ETH_IRQ 5
2281 +#define SSL_IRQ 6
2282 +#define SSL_WRITE_IRQ 7
2283 +#define ACCEPT_IRQ 8
2284 +#define MCONSOLE_IRQ 9
2285 +#define WINCH_IRQ 10
2286 +#define SIGIO_WRITE_IRQ 11
2287 +#define TELNETD_IRQ 12
2288 +#define XTERM_IRQ 13
2289 +#define RANDOM_IRQ 14
2290 +
2291 +#define LAST_IRQ RANDOM_IRQ
2292 +#define NR_IRQS (LAST_IRQ + 1)
2293 +
2294 +#endif
2295 --- /dev/null
2296 +++ b/arch/um/include/uapi/asm/irqflags.h
2297 @@ -0,0 +1,42 @@
2298 +#ifndef __UM_IRQFLAGS_H
2299 +#define __UM_IRQFLAGS_H
2300 +
2301 +extern int get_signals(void);
2302 +extern int set_signals(int enable);
2303 +extern void block_signals(void);
2304 +extern void unblock_signals(void);
2305 +
2306 +static inline unsigned long arch_local_save_flags(void)
2307 +{
2308 + return get_signals();
2309 +}
2310 +
2311 +static inline void arch_local_irq_restore(unsigned long flags)
2312 +{
2313 + set_signals(flags);
2314 +}
2315 +
2316 +static inline void arch_local_irq_enable(void)
2317 +{
2318 + unblock_signals();
2319 +}
2320 +
2321 +static inline void arch_local_irq_disable(void)
2322 +{
2323 + block_signals();
2324 +}
2325 +
2326 +static inline unsigned long arch_local_irq_save(void)
2327 +{
2328 + unsigned long flags;
2329 + flags = arch_local_save_flags();
2330 + arch_local_irq_disable();
2331 + return flags;
2332 +}
2333 +
2334 +static inline bool arch_irqs_disabled(void)
2335 +{
2336 + return arch_local_save_flags() == 0;
2337 +}
2338 +
2339 +#endif
2340 --- /dev/null
2341 +++ b/arch/um/include/uapi/asm/kmap_types.h
2342 @@ -0,0 +1,13 @@
2343 +/*
2344 + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
2345 + * Licensed under the GPL
2346 + */
2347 +
2348 +#ifndef __UM_KMAP_TYPES_H
2349 +#define __UM_KMAP_TYPES_H
2350 +
2351 +/* No more #include "asm/arch/kmap_types.h" ! */
2352 +
2353 +#define KM_TYPE_NR 14
2354 +
2355 +#endif
2356 --- /dev/null
2357 +++ b/arch/um/include/uapi/asm/kvm_para.h
2358 @@ -0,0 +1 @@
2359 +#include <asm-generic/kvm_para.h>
2360 --- /dev/null
2361 +++ b/arch/um/include/uapi/asm/mmu.h
2362 @@ -0,0 +1,24 @@
2363 +/*
2364 + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
2365 + * Licensed under the GPL
2366 + */
2367 +
2368 +#ifndef __ARCH_UM_MMU_H
2369 +#define __ARCH_UM_MMU_H
2370 +
2371 +#include <mm_id.h>
2372 +#include <asm/mm_context.h>
2373 +
2374 +typedef struct mm_context {
2375 + struct mm_id id;
2376 + struct uml_arch_mm_context arch;
2377 + struct page *stub_pages[2];
2378 +} mm_context_t;
2379 +
2380 +extern void __switch_mm(struct mm_id * mm_idp);
2381 +
2382 +/* Avoid tangled inclusion with asm/ldt.h */
2383 +extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
2384 +extern void free_ldt(struct mm_context *mm);
2385 +
2386 +#endif
2387 --- /dev/null
2388 +++ b/arch/um/include/uapi/asm/mmu_context.h
2389 @@ -0,0 +1,58 @@
2390 +/*
2391 + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
2392 + * Licensed under the GPL
2393 + */
2394 +
2395 +#ifndef __UM_MMU_CONTEXT_H
2396 +#define __UM_MMU_CONTEXT_H
2397 +
2398 +#include <linux/sched.h>
2399 +#include <asm/mmu.h>
2400 +
2401 +extern void uml_setup_stubs(struct mm_struct *mm);
2402 +extern void arch_exit_mmap(struct mm_struct *mm);
2403 +
2404 +#define deactivate_mm(tsk,mm) do { } while (0)
2405 +
2406 +extern void force_flush_all(void);
2407 +
2408 +static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
2409 +{
2410 + /*
2411 + * This is called by fs/exec.c and sys_unshare()
2412 + * when the new ->mm is used for the first time.
2413 + */
2414 + __switch_mm(&new->context.id);
2415 + down_write(&new->mmap_sem);
2416 + uml_setup_stubs(new);
2417 + up_write(&new->mmap_sem);
2418 +}
2419 +
2420 +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
2421 + struct task_struct *tsk)
2422 +{
2423 + unsigned cpu = smp_processor_id();
2424 +
2425 + if(prev != next){
2426 + cpumask_clear_cpu(cpu, mm_cpumask(prev));
2427 + cpumask_set_cpu(cpu, mm_cpumask(next));
2428 + if(next != &init_mm)
2429 + __switch_mm(&next->context.id);
2430 + }
2431 +}
2432 +
2433 +static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
2434 +{
2435 + uml_setup_stubs(mm);
2436 +}
2437 +
2438 +static inline void enter_lazy_tlb(struct mm_struct *mm,
2439 + struct task_struct *tsk)
2440 +{
2441 +}
2442 +
2443 +extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
2444 +
2445 +extern void destroy_context(struct mm_struct *mm);
2446 +
2447 +#endif
2448 --- /dev/null
2449 +++ b/arch/um/include/uapi/asm/page.h
2450 @@ -0,0 +1,127 @@
2451 +/*
2452 + * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
2453 + * Copyright 2003 PathScale, Inc.
2454 + * Licensed under the GPL
2455 + */
2456 +
2457 +#ifndef __UM_PAGE_H
2458 +#define __UM_PAGE_H
2459 +
2460 +#include <linux/const.h>
2461 +
2462 +/* PAGE_SHIFT determines the page size */
2463 +#define PAGE_SHIFT 12
2464 +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
2465 +#define PAGE_MASK (~(PAGE_SIZE-1))
2466 +
2467 +#ifndef __ASSEMBLY__
2468 +
2469 +struct page;
2470 +
2471 +#include <linux/types.h>
2472 +#include <asm/vm-flags.h>
2473 +
2474 +/*
2475 + * These are used to make use of C type-checking..
2476 + */
2477 +
2478 +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
2479 +#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
2480 +
2481 +#define clear_user_page(page, vaddr, pg) clear_page(page)
2482 +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
2483 +
2484 +#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
2485 +
2486 +typedef struct { unsigned long pte_low, pte_high; } pte_t;
2487 +typedef struct { unsigned long pmd; } pmd_t;
2488 +typedef struct { unsigned long pgd; } pgd_t;
2489 +#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
2490 +
2491 +#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
2492 +#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
2493 +#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
2494 +#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
2495 + smp_wmb(); \
2496 + (to).pte_low = (from).pte_low; })
2497 +#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
2498 +#define pte_set_val(pte, phys, prot) \
2499 + ({ (pte).pte_high = (phys) >> 32; \
2500 + (pte).pte_low = (phys) | pgprot_val(prot); })
2501 +
2502 +#define pmd_val(x) ((x).pmd)
2503 +#define __pmd(x) ((pmd_t) { (x) } )
2504 +
2505 +typedef unsigned long long pfn_t;
2506 +typedef unsigned long long phys_t;
2507 +
2508 +#else
2509 +
2510 +typedef struct { unsigned long pte; } pte_t;
2511 +typedef struct { unsigned long pgd; } pgd_t;
2512 +
2513 +#ifdef CONFIG_3_LEVEL_PGTABLES
2514 +typedef struct { unsigned long pmd; } pmd_t;
2515 +#define pmd_val(x) ((x).pmd)
2516 +#define __pmd(x) ((pmd_t) { (x) } )
2517 +#endif
2518 +
2519 +#define pte_val(x) ((x).pte)
2520 +
2521 +
2522 +#define pte_get_bits(p, bits) ((p).pte & (bits))
2523 +#define pte_set_bits(p, bits) ((p).pte |= (bits))
2524 +#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
2525 +#define pte_copy(to, from) ((to).pte = (from).pte)
2526 +#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
2527 +#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
2528 +
2529 +typedef unsigned long pfn_t;
2530 +typedef unsigned long phys_t;
2531 +
2532 +#endif
2533 +
2534 +typedef struct { unsigned long pgprot; } pgprot_t;
2535 +
2536 +typedef struct page *pgtable_t;
2537 +
2538 +#define pgd_val(x) ((x).pgd)
2539 +#define pgprot_val(x) ((x).pgprot)
2540 +
2541 +#define __pte(x) ((pte_t) { (x) } )
2542 +#define __pgd(x) ((pgd_t) { (x) } )
2543 +#define __pgprot(x) ((pgprot_t) { (x) } )
2544 +
2545 +extern unsigned long uml_physmem;
2546 +
2547 +#define PAGE_OFFSET (uml_physmem)
2548 +#define KERNELBASE PAGE_OFFSET
2549 +
2550 +#define __va_space (8*1024*1024)
2551 +
2552 +#include <mem.h>
2553 +
2554 +/* Cast to unsigned long before casting to void * to avoid a warning from
2555 + * mmap_kmem about cutting a long long down to a void *. Not sure that
2556 + * casting is the right thing, but 32-bit UML can't have 64-bit virtual
2557 + * addresses
2558 + */
2559 +#define __pa(virt) to_phys((void *) (unsigned long) (virt))
2560 +#define __va(phys) to_virt((unsigned long) (phys))
2561 +
2562 +#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
2563 +#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
2564 +
2565 +#define pfn_valid(pfn) ((pfn) < max_mapnr)
2566 +#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
2567 +
2568 +#include <asm-generic/memory_model.h>
2569 +#include <asm-generic/getorder.h>
2570 +
2571 +#endif /* __ASSEMBLY__ */
2572 +
2573 +#ifdef CONFIG_X86_32
2574 +#define __HAVE_ARCH_GATE_AREA 1
2575 +#endif
2576 +
2577 +#endif /* __UM_PAGE_H */
2578 --- /dev/null
2579 +++ b/arch/um/include/uapi/asm/pgalloc.h
2580 @@ -0,0 +1,61 @@
2581 +/*
2582 + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
2583 + * Copyright 2003 PathScale, Inc.
2584 + * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
2585 + * Licensed under the GPL
2586 + */
2587 +
2588 +#ifndef __UM_PGALLOC_H
2589 +#define __UM_PGALLOC_H
2590 +
2591 +#include <linux/mm.h>
2592 +
2593 +#define pmd_populate_kernel(mm, pmd, pte) \
2594 + set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
2595 +
2596 +#define pmd_populate(mm, pmd, pte) \
2597 + set_pmd(pmd, __pmd(_PAGE_TABLE + \
2598 + ((unsigned long long)page_to_pfn(pte) << \
2599 + (unsigned long long) PAGE_SHIFT)))
2600 +#define pmd_pgtable(pmd) pmd_page(pmd)
2601 +
2602 +/*
2603 + * Allocate and free page tables.
2604 + */
2605 +extern pgd_t *pgd_alloc(struct mm_struct *);
2606 +extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
2607 +
2608 +extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
2609 +extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
2610 +
2611 +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2612 +{
2613 + free_page((unsigned long) pte);
2614 +}
2615 +
2616 +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
2617 +{
2618 + pgtable_page_dtor(pte);
2619 + __free_page(pte);
2620 +}
2621 +
2622 +#define __pte_free_tlb(tlb,pte, address) \
2623 +do { \
2624 + pgtable_page_dtor(pte); \
2625 + tlb_remove_page((tlb),(pte)); \
2626 +} while (0)
2627 +
2628 +#ifdef CONFIG_3_LEVEL_PGTABLES
2629 +
2630 +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
2631 +{
2632 + free_page((unsigned long)pmd);
2633 +}
2634 +
2635 +#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
2636 +#endif
2637 +
2638 +#define check_pgt_cache() do { } while (0)
2639 +
2640 +#endif
2641 +
2642 --- /dev/null
2643 +++ b/arch/um/include/uapi/asm/pgtable-2level.h
2644 @@ -0,0 +1,53 @@
2645 +/*
2646 + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
2647 + * Copyright 2003 PathScale, Inc.
2648 + * Derived from include/asm-i386/pgtable.h
2649 + * Licensed under the GPL
2650 + */
2651 +
2652 +#ifndef __UM_PGTABLE_2LEVEL_H
2653 +#define __UM_PGTABLE_2LEVEL_H
2654 +
2655 +#include <asm-generic/pgtable-nopmd.h>
2656 +
2657 +/* PGDIR_SHIFT determines what a third-level page table entry can map */
2658 +
2659 +#define PGDIR_SHIFT 22
2660 +#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
2661 +#define PGDIR_MASK (~(PGDIR_SIZE-1))
2662 +
2663 +/*
2664 + * entries per page directory level: the i386 is two-level, so
2665 + * we don't really have any PMD directory physically.
2666 + */
2667 +#define PTRS_PER_PTE 1024
2668 +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
2669 +#define PTRS_PER_PGD 1024
2670 +#define FIRST_USER_ADDRESS 0
2671 +
2672 +#define pte_ERROR(e) \
2673 + printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
2674 + pte_val(e))
2675 +#define pgd_ERROR(e) \
2676 + printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
2677 + pgd_val(e))
2678 +
2679 +static inline int pgd_newpage(pgd_t pgd) { return 0; }
2680 +static inline void pgd_mkuptodate(pgd_t pgd) { }
2681 +
2682 +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
2683 +
2684 +#define pte_pfn(x) phys_to_pfn(pte_val(x))
2685 +#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
2686 +#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
2687 +
2688 +/*
2689 + * Bits 0 through 4 are taken
2690 + */
2691 +#define PTE_FILE_MAX_BITS 27
2692 +
2693 +#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
2694 +
2695 +#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
2696 +
2697 +#endif
2698 --- /dev/null
2699 +++ b/arch/um/include/uapi/asm/pgtable-3level.h
2700 @@ -0,0 +1,136 @@
2701 +/*
2702 + * Copyright 2003 PathScale Inc
2703 + * Derived from include/asm-i386/pgtable.h
2704 + * Licensed under the GPL
2705 + */
2706 +
2707 +#ifndef __UM_PGTABLE_3LEVEL_H
2708 +#define __UM_PGTABLE_3LEVEL_H
2709 +
2710 +#include <asm-generic/pgtable-nopud.h>
2711 +
2712 +/* PGDIR_SHIFT determines what a third-level page table entry can map */
2713 +
2714 +#ifdef CONFIG_64BIT
2715 +#define PGDIR_SHIFT 30
2716 +#else
2717 +#define PGDIR_SHIFT 31
2718 +#endif
2719 +#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
2720 +#define PGDIR_MASK (~(PGDIR_SIZE-1))
2721 +
2722 +/* PMD_SHIFT determines the size of the area a second-level page table can
2723 + * map
2724 + */
2725 +
2726 +#define PMD_SHIFT 21
2727 +#define PMD_SIZE (1UL << PMD_SHIFT)
2728 +#define PMD_MASK (~(PMD_SIZE-1))
2729 +
2730 +/*
2731 + * entries per page directory level
2732 + */
2733 +
2734 +#define PTRS_PER_PTE 512
2735 +#ifdef CONFIG_64BIT
2736 +#define PTRS_PER_PMD 512
2737 +#define PTRS_PER_PGD 512
2738 +#else
2739 +#define PTRS_PER_PMD 1024
2740 +#define PTRS_PER_PGD 1024
2741 +#endif
2742 +
2743 +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
2744 +#define FIRST_USER_ADDRESS 0
2745 +
2746 +#define pte_ERROR(e) \
2747 + printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
2748 + pte_val(e))
2749 +#define pmd_ERROR(e) \
2750 + printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
2751 + pmd_val(e))
2752 +#define pgd_ERROR(e) \
2753 + printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
2754 + pgd_val(e))
2755 +
2756 +#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
2757 +#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
2758 +#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
2759 +#define pud_populate(mm, pud, pmd) \
2760 + set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
2761 +
2762 +#ifdef CONFIG_64BIT
2763 +#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
2764 +#else
2765 +#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
2766 +#endif
2767 +
2768 +static inline int pgd_newpage(pgd_t pgd)
2769 +{
2770 + return(pgd_val(pgd) & _PAGE_NEWPAGE);
2771 +}
2772 +
2773 +static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
2774 +
2775 +#ifdef CONFIG_64BIT
2776 +#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
2777 +#else
2778 +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
2779 +#endif
2780 +
2781 +struct mm_struct;
2782 +extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
2783 +
2784 +static inline void pud_clear (pud_t *pud)
2785 +{
2786 + set_pud(pud, __pud(_PAGE_NEWPAGE));
2787 +}
2788 +
2789 +#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
2790 +#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
2791 +
2792 +/* Find an entry in the second-level page table.. */
2793 +#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
2794 + pmd_index(address))
2795 +
2796 +static inline unsigned long pte_pfn(pte_t pte)
2797 +{
2798 + return phys_to_pfn(pte_val(pte));
2799 +}
2800 +
2801 +static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
2802 +{
2803 + pte_t pte;
2804 + phys_t phys = pfn_to_phys(page_nr);
2805 +
2806 + pte_set_val(pte, phys, pgprot);
2807 + return pte;
2808 +}
2809 +
2810 +static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
2811 +{
2812 + return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
2813 +}
2814 +
2815 +/*
2816 + * Bits 0 through 3 are taken in the low part of the pte,
2817 + * put the 32 bits of offset into the high part.
2818 + */
2819 +#define PTE_FILE_MAX_BITS 32
2820 +
2821 +#ifdef CONFIG_64BIT
2822 +
2823 +#define pte_to_pgoff(p) ((p).pte >> 32)
2824 +
2825 +#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
2826 +
2827 +#else
2828 +
2829 +#define pte_to_pgoff(pte) ((pte).pte_high)
2830 +
2831 +#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
2832 +
2833 +#endif
2834 +
2835 +#endif
2836 +
2837 --- /dev/null
2838 +++ b/arch/um/include/uapi/asm/pgtable.h
2839 @@ -0,0 +1,375 @@
2840 +/*
2841 + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
2842 + * Copyright 2003 PathScale, Inc.
2843 + * Derived from include/asm-i386/pgtable.h
2844 + * Licensed under the GPL
2845 + */
2846 +
2847 +#ifndef __UM_PGTABLE_H
2848 +#define __UM_PGTABLE_H
2849 +
2850 +#include <asm/fixmap.h>
2851 +
2852 +#define _PAGE_PRESENT 0x001
2853 +#define _PAGE_NEWPAGE 0x002
2854 +#define _PAGE_NEWPROT 0x004
2855 +#define _PAGE_RW 0x020
2856 +#define _PAGE_USER 0x040
2857 +#define _PAGE_ACCESSED 0x080
2858 +#define _PAGE_DIRTY 0x100
2859 +/* If _PAGE_PRESENT is clear, we use these: */
2860 +#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
2861 +#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
2862 + pte_present gives true */
2863 +
2864 +#ifdef CONFIG_3_LEVEL_PGTABLES
2865 +#include <asm/pgtable-3level.h>
2866 +#else
2867 +#include <asm/pgtable-2level.h>
2868 +#endif
2869 +
2870 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
2871 +
2872 +/* zero page used for uninitialized stuff */
2873 +extern unsigned long *empty_zero_page;
2874 +
2875 +#define pgtable_cache_init() do ; while (0)
2876 +
2877 +/* Just any arbitrary offset to the start of the vmalloc VM area: the
2878 + * current 8MB value just means that there will be a 8MB "hole" after the
2879 + * physical memory until the kernel virtual memory starts. That means that
2880 + * any out-of-bounds memory accesses will hopefully be caught.
2881 + * The vmalloc() routines leaves a hole of 4kB between each vmalloced
2882 + * area for the same reason. ;)
2883 + */
2884 +
2885 +extern unsigned long end_iomem;
2886 +
2887 +#define VMALLOC_OFFSET (__va_space)
2888 +#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
2889 +#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
2890 +#ifdef CONFIG_HIGHMEM
2891 +# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
2892 +#else
2893 +# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
2894 +#endif
2895 +#define MODULES_VADDR VMALLOC_START
2896 +#define MODULES_END VMALLOC_END
2897 +#define MODULES_LEN (MODULES_VADDR - MODULES_END)
2898 +
2899 +#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
2900 +#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
2901 +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
2902 +#define __PAGE_KERNEL_EXEC \
2903 + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
2904 +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
2905 +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
2906 +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
2907 +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
2908 +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
2909 +#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
2910 +
2911 +/*
2912 + * The i386 can't do page protection for execute, and considers that the same
2913 + * are read.
2914 + * Also, write permissions imply read permissions. This is the closest we can
2915 + * get..
2916 + */
2917 +#define __P000 PAGE_NONE
2918 +#define __P001 PAGE_READONLY
2919 +#define __P010 PAGE_COPY
2920 +#define __P011 PAGE_COPY
2921 +#define __P100 PAGE_READONLY
2922 +#define __P101 PAGE_READONLY
2923 +#define __P110 PAGE_COPY
2924 +#define __P111 PAGE_COPY
2925 +
2926 +#define __S000 PAGE_NONE
2927 +#define __S001 PAGE_READONLY
2928 +#define __S010 PAGE_SHARED
2929 +#define __S011 PAGE_SHARED
2930 +#define __S100 PAGE_READONLY
2931 +#define __S101 PAGE_READONLY
2932 +#define __S110 PAGE_SHARED
2933 +#define __S111 PAGE_SHARED
2934 +
2935 +/*
2936 + * ZERO_PAGE is a global shared page that is always zero: used
2937 + * for zero-mapped memory areas etc..
2938 + */
2939 +#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
2940 +
2941 +#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
2942 +
2943 +#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
2944 +#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
2945 +
2946 +#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
2947 +#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
2948 +
2949 +#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
2950 +#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
2951 +
2952 +#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
2953 +#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
2954 +
2955 +#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
2956 +
2957 +#define pte_page(x) pfn_to_page(pte_pfn(x))
2958 +
2959 +#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
2960 +
2961 +/*
2962 + * =================================
2963 + * Flags checking section.
2964 + * =================================
2965 + */
2966 +
2967 +static inline int pte_none(pte_t pte)
2968 +{
2969 + return pte_is_zero(pte);
2970 +}
2971 +
2972 +/*
2973 + * The following only work if pte_present() is true.
2974 + * Undefined behaviour if not..
2975 + */
2976 +static inline int pte_read(pte_t pte)
2977 +{
2978 + return((pte_get_bits(pte, _PAGE_USER)) &&
2979 + !(pte_get_bits(pte, _PAGE_PROTNONE)));
2980 +}
2981 +
2982 +static inline int pte_exec(pte_t pte){
2983 + return((pte_get_bits(pte, _PAGE_USER)) &&
2984 + !(pte_get_bits(pte, _PAGE_PROTNONE)));
2985 +}
2986 +
2987 +static inline int pte_write(pte_t pte)
2988 +{
2989 + return((pte_get_bits(pte, _PAGE_RW)) &&
2990 + !(pte_get_bits(pte, _PAGE_PROTNONE)));
2991 +}
2992 +
2993 +/*
2994 + * The following only works if pte_present() is not true.
2995 + */
2996 +static inline int pte_file(pte_t pte)
2997 +{
2998 + return pte_get_bits(pte, _PAGE_FILE);
2999 +}
3000 +
3001 +static inline int pte_dirty(pte_t pte)
3002 +{
3003 + return pte_get_bits(pte, _PAGE_DIRTY);
3004 +}
3005 +
3006 +static inline int pte_young(pte_t pte)
3007 +{
3008 + return pte_get_bits(pte, _PAGE_ACCESSED);
3009 +}
3010 +
3011 +static inline int pte_newpage(pte_t pte)
3012 +{
3013 + return pte_get_bits(pte, _PAGE_NEWPAGE);
3014 +}
3015 +
3016 +static inline int pte_newprot(pte_t pte)
3017 +{
3018 + return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
3019 +}
3020 +
3021 +static inline int pte_special(pte_t pte)
3022 +{
3023 + return 0;
3024 +}
3025 +
3026 +/*
3027 + * =================================
3028 + * Flags setting section.
3029 + * =================================
3030 + */
3031 +
3032 +static inline pte_t pte_mknewprot(pte_t pte)
3033 +{
3034 + pte_set_bits(pte, _PAGE_NEWPROT);
3035 + return(pte);
3036 +}
3037 +
3038 +static inline pte_t pte_mkclean(pte_t pte)
3039 +{
3040 + pte_clear_bits(pte, _PAGE_DIRTY);
3041 + return(pte);
3042 +}
3043 +
3044 +static inline pte_t pte_mkold(pte_t pte)
3045 +{
3046 + pte_clear_bits(pte, _PAGE_ACCESSED);
3047 + return(pte);
3048 +}
3049 +
3050 +static inline pte_t pte_wrprotect(pte_t pte)
3051 +{
3052 + pte_clear_bits(pte, _PAGE_RW);
3053 + return(pte_mknewprot(pte));
3054 +}
3055 +
3056 +static inline pte_t pte_mkread(pte_t pte)
3057 +{
3058 + pte_set_bits(pte, _PAGE_USER);
3059 + return(pte_mknewprot(pte));
3060 +}
3061 +
3062 +static inline pte_t pte_mkdirty(pte_t pte)
3063 +{
3064 + pte_set_bits(pte, _PAGE_DIRTY);
3065 + return(pte);
3066 +}
3067 +
3068 +static inline pte_t pte_mkyoung(pte_t pte)
3069 +{
3070 + pte_set_bits(pte, _PAGE_ACCESSED);
3071 + return(pte);
3072 +}
3073 +
3074 +static inline pte_t pte_mkwrite(pte_t pte)
3075 +{
3076 + pte_set_bits(pte, _PAGE_RW);
3077 + return(pte_mknewprot(pte));
3078 +}
3079 +
3080 +static inline pte_t pte_mkuptodate(pte_t pte)
3081 +{
3082 + pte_clear_bits(pte, _PAGE_NEWPAGE);
3083 + if(pte_present(pte))
3084 + pte_clear_bits(pte, _PAGE_NEWPROT);
3085 + return(pte);
3086 +}
3087 +
3088 +static inline pte_t pte_mknewpage(pte_t pte)
3089 +{
3090 + pte_set_bits(pte, _PAGE_NEWPAGE);
3091 + return(pte);
3092 +}
3093 +
3094 +static inline pte_t pte_mkspecial(pte_t pte)
3095 +{
3096 + return(pte);
3097 +}
3098 +
3099 +static inline void set_pte(pte_t *pteptr, pte_t pteval)
3100 +{
3101 + pte_copy(*pteptr, pteval);
3102 +
3103 + /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
3104 + * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
3105 + * mapped pages.
3106 + */
3107 +
3108 + *pteptr = pte_mknewpage(*pteptr);
3109 + if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
3110 +}
3111 +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
3112 +
3113 +#define __HAVE_ARCH_PTE_SAME
3114 +static inline int pte_same(pte_t pte_a, pte_t pte_b)
3115 +{
3116 + return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
3117 +}
3118 +
3119 +/*
3120 + * Conversion functions: convert a page and protection to a page entry,
3121 + * and a page entry and page directory to the page they refer to.
3122 + */
3123 +
3124 +#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
3125 +#define __virt_to_page(virt) phys_to_page(__pa(virt))
3126 +#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
3127 +#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
3128 +
3129 +#define mk_pte(page, pgprot) \
3130 + ({ pte_t pte; \
3131 + \
3132 + pte_set_val(pte, page_to_phys(page), (pgprot)); \
3133 + if (pte_present(pte)) \
3134 + pte_mknewprot(pte_mknewpage(pte)); \
3135 + pte;})
3136 +
3137 +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
3138 +{
3139 + pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
3140 + return pte;
3141 +}
3142 +
3143 +/*
3144 + * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
3145 + *
3146 + * this macro returns the index of the entry in the pgd page which would
3147 + * control the given virtual address
3148 + */
3149 +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
3150 +
3151 +/*
3152 + * pgd_offset() returns a (pgd_t *)
3153 + * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
3154 + */
3155 +#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
3156 +
3157 +/*
3158 + * a shortcut which implies the use of the kernel's pgd, instead
3159 + * of a process's
3160 + */
3161 +#define pgd_offset_k(address) pgd_offset(&init_mm, address)
3162 +
3163 +/*
3164 + * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
3165 + *
3166 + * this macro returns the index of the entry in the pmd page which would
3167 + * control the given virtual address
3168 + */
3169 +#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
3170 +#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
3171 +
3172 +#define pmd_page_vaddr(pmd) \
3173 + ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
3174 +
3175 +/*
3176 + * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
3177 + *
3178 + * this macro returns the index of the entry in the pte page which would
3179 + * control the given virtual address
3180 + */
3181 +#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
3182 +#define pte_offset_kernel(dir, address) \
3183 + ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
3184 +#define pte_offset_map(dir, address) \
3185 + ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
3186 +#define pte_unmap(pte) do { } while (0)
3187 +
3188 +struct mm_struct;
3189 +extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
3190 +
3191 +#define update_mmu_cache(vma,address,ptep) do ; while (0)
3192 +
3193 +/* Encode and de-code a swap entry */
3194 +#define __swp_type(x) (((x).val >> 5) & 0x1f)
3195 +#define __swp_offset(x) ((x).val >> 11)
3196 +
3197 +#define __swp_entry(type, offset) \
3198 + ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
3199 +#define __pte_to_swp_entry(pte) \
3200 + ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
3201 +#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
3202 +
3203 +#define kern_addr_valid(addr) (1)
3204 +
3205 +#include <asm-generic/pgtable.h>
3206 +
3207 +/* Clear a kernel PTE and flush it from the TLB */
3208 +#define kpte_clear_flush(ptep, vaddr) \
3209 +do { \
3210 + pte_clear(&init_mm, (vaddr), (ptep)); \
3211 + __flush_tlb_one((vaddr)); \
3212 +} while (0)
3213 +
3214 +#endif
3215 --- /dev/null
3216 +++ b/arch/um/include/uapi/asm/processor-generic.h
3217 @@ -0,0 +1,115 @@
3218 +/*
3219 + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3220 + * Licensed under the GPL
3221 + */
3222 +
3223 +#ifndef __UM_PROCESSOR_GENERIC_H
3224 +#define __UM_PROCESSOR_GENERIC_H
3225 +
3226 +struct pt_regs;
3227 +
3228 +struct task_struct;
3229 +
3230 +#include <asm/ptrace.h>
3231 +#include <registers.h>
3232 +#include <sysdep/archsetjmp.h>
3233 +
3234 +#include <linux/prefetch.h>
3235 +
3236 +struct mm_struct;
3237 +
3238 +struct thread_struct {
3239 + struct pt_regs regs;
3240 + struct pt_regs *segv_regs;
3241 + int singlestep_syscall;
3242 + void *fault_addr;
3243 + jmp_buf *fault_catcher;
3244 + struct task_struct *prev_sched;
3245 + struct arch_thread arch;
3246 + jmp_buf switch_buf;
3247 + struct {
3248 + int op;
3249 + union {
3250 + struct {
3251 + int pid;
3252 + } fork, exec;
3253 + struct {
3254 + int (*proc)(void *);
3255 + void *arg;
3256 + } thread;
3257 + struct {
3258 + void (*proc)(void *);
3259 + void *arg;
3260 + } cb;
3261 + } u;
3262 + } request;
3263 +};
3264 +
3265 +#define INIT_THREAD \
3266 +{ \
3267 + .regs = EMPTY_REGS, \
3268 + .fault_addr = NULL, \
3269 + .prev_sched = NULL, \
3270 + .arch = INIT_ARCH_THREAD, \
3271 + .request = { 0 } \
3272 +}
3273 +
3274 +static inline void release_thread(struct task_struct *task)
3275 +{
3276 +}
3277 +
3278 +extern unsigned long thread_saved_pc(struct task_struct *t);
3279 +
3280 +static inline void mm_copy_segments(struct mm_struct *from_mm,
3281 + struct mm_struct *new_mm)
3282 +{
3283 +}
3284 +
3285 +#define init_stack (init_thread_union.stack)
3286 +
3287 +/*
3288 + * User space process size: 3GB (default).
3289 + */
3290 +extern unsigned long task_size;
3291 +
3292 +#define TASK_SIZE (task_size)
3293 +
3294 +#undef STACK_TOP
3295 +#undef STACK_TOP_MAX
3296 +
3297 +extern unsigned long stacksizelim;
3298 +
3299 +#define STACK_ROOM (stacksizelim)
3300 +#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
3301 +#define STACK_TOP_MAX STACK_TOP
3302 +
3303 +/* This decides where the kernel will search for a free chunk of vm
3304 + * space during mmap's.
3305 + */
3306 +#define TASK_UNMAPPED_BASE (0x40000000)
3307 +
3308 +extern void start_thread(struct pt_regs *regs, unsigned long entry,
3309 + unsigned long stack);
3310 +
3311 +struct cpuinfo_um {
3312 + unsigned long loops_per_jiffy;
3313 + int ipi_pipe[2];
3314 +};
3315 +
3316 +extern struct cpuinfo_um boot_cpu_data;
3317 +
3318 +#define my_cpu_data cpu_data[smp_processor_id()]
3319 +
3320 +#ifdef CONFIG_SMP
3321 +extern struct cpuinfo_um cpu_data[];
3322 +#define current_cpu_data cpu_data[smp_processor_id()]
3323 +#else
3324 +#define cpu_data (&boot_cpu_data)
3325 +#define current_cpu_data boot_cpu_data
3326 +#endif
3327 +
3328 +
3329 +#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
3330 +extern unsigned long get_wchan(struct task_struct *p);
3331 +
3332 +#endif
3333 --- /dev/null
3334 +++ b/arch/um/include/uapi/asm/ptrace-generic.h
3335 @@ -0,0 +1,45 @@
3336 +/*
3337 + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3338 + * Licensed under the GPL
3339 + */
3340 +
3341 +#ifndef __UM_PTRACE_GENERIC_H
3342 +#define __UM_PTRACE_GENERIC_H
3343 +
3344 +#ifndef __ASSEMBLY__
3345 +
3346 +#include <asm/ptrace-abi.h>
3347 +#include <sysdep/ptrace.h>
3348 +
3349 +struct pt_regs {
3350 + struct uml_pt_regs regs;
3351 +};
3352 +
3353 +#define arch_has_single_step() (1)
3354 +
3355 +#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
3356 +
3357 +#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
3358 +#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
3359 +
3360 +#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs)
3361 +
3362 +#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs)
3363 +
3364 +#define instruction_pointer(regs) PT_REGS_IP(regs)
3365 +
3366 +struct task_struct;
3367 +
3368 +extern long subarch_ptrace(struct task_struct *child, long request,
3369 + unsigned long addr, unsigned long data);
3370 +extern unsigned long getreg(struct task_struct *child, int regno);
3371 +extern int putreg(struct task_struct *child, int regno, unsigned long value);
3372 +
3373 +extern int arch_copy_tls(struct task_struct *new);
3374 +extern void clear_flushed_tls(struct task_struct *task);
3375 +extern void syscall_trace_enter(struct pt_regs *regs);
3376 +extern void syscall_trace_leave(struct pt_regs *regs);
3377 +
3378 +#endif
3379 +
3380 +#endif
3381 --- /dev/null
3382 +++ b/arch/um/include/uapi/asm/setup.h
3383 @@ -0,0 +1,10 @@
3384 +#ifndef SETUP_H_INCLUDED
3385 +#define SETUP_H_INCLUDED
3386 +
3387 +/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the
3388 + * command line, so this choice is ok.
3389 + */
3390 +
3391 +#define COMMAND_LINE_SIZE 4096
3392 +
3393 +#endif /* SETUP_H_INCLUDED */
3394 --- /dev/null
3395 +++ b/arch/um/include/uapi/asm/smp.h
3396 @@ -0,0 +1,32 @@
3397 +#ifndef __UM_SMP_H
3398 +#define __UM_SMP_H
3399 +
3400 +#ifdef CONFIG_SMP
3401 +
3402 +#include <linux/bitops.h>
3403 +#include <asm/current.h>
3404 +#include <linux/cpumask.h>
3405 +
3406 +#define raw_smp_processor_id() (current_thread->cpu)
3407 +
3408 +#define cpu_logical_map(n) (n)
3409 +#define cpu_number_map(n) (n)
3410 +extern int hard_smp_processor_id(void);
3411 +#define NO_PROC_ID -1
3412 +
3413 +extern int ncpus;
3414 +
3415 +
3416 +static inline void smp_cpus_done(unsigned int maxcpus)
3417 +{
3418 +}
3419 +
3420 +extern struct task_struct *idle_threads[NR_CPUS];
3421 +
3422 +#else
3423 +
3424 +#define hard_smp_processor_id() 0
3425 +
3426 +#endif
3427 +
3428 +#endif
3429 --- /dev/null
3430 +++ b/arch/um/include/uapi/asm/stacktrace.h
3431 @@ -0,0 +1,42 @@
3432 +#ifndef _ASM_UML_STACKTRACE_H
3433 +#define _ASM_UML_STACKTRACE_H
3434 +
3435 +#include <linux/uaccess.h>
3436 +#include <linux/ptrace.h>
3437 +
3438 +struct stack_frame {
3439 + struct stack_frame *next_frame;
3440 + unsigned long return_address;
3441 +};
3442 +
3443 +struct stacktrace_ops {
3444 + void (*address)(void *data, unsigned long address, int reliable);
3445 +};
3446 +
3447 +#ifdef CONFIG_FRAME_POINTER
3448 +static inline unsigned long
3449 +get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
3450 +{
3451 + if (!task || task == current)
3452 + return segv_regs ? PT_REGS_BP(segv_regs) : current_bp();
3453 + return KSTK_EBP(task);
3454 +}
3455 +#else
3456 +static inline unsigned long
3457 +get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
3458 +{
3459 + return 0;
3460 +}
3461 +#endif
3462 +
3463 +static inline unsigned long
3464 +*get_stack_pointer(struct task_struct *task, struct pt_regs *segv_regs)
3465 +{
3466 + if (!task || task == current)
3467 + return segv_regs ? (unsigned long *)PT_REGS_SP(segv_regs) : current_sp();
3468 + return (unsigned long *)KSTK_ESP(task);
3469 +}
3470 +
3471 +void dump_trace(struct task_struct *tsk, const struct stacktrace_ops *ops, void *data);
3472 +
3473 +#endif /* _ASM_UML_STACKTRACE_H */
3474 --- /dev/null
3475 +++ b/arch/um/include/uapi/asm/sysrq.h
3476 @@ -0,0 +1,7 @@
3477 +#ifndef __UM_SYSRQ_H
3478 +#define __UM_SYSRQ_H
3479 +
3480 +struct task_struct;
3481 +extern void show_trace(struct task_struct* task, unsigned long *stack);
3482 +
3483 +#endif
3484 --- /dev/null
3485 +++ b/arch/um/include/uapi/asm/thread_info.h
3486 @@ -0,0 +1,78 @@
3487 +/*
3488 + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3489 + * Licensed under the GPL
3490 + */
3491 +
3492 +#ifndef __UM_THREAD_INFO_H
3493 +#define __UM_THREAD_INFO_H
3494 +
3495 +#ifndef __ASSEMBLY__
3496 +
3497 +#include <asm/types.h>
3498 +#include <asm/page.h>
3499 +#include <asm/uaccess.h>
3500 +
3501 +struct thread_info {
3502 + struct task_struct *task; /* main task structure */
3503 + struct exec_domain *exec_domain; /* execution domain */
3504 + unsigned long flags; /* low level flags */
3505 + __u32 cpu; /* current CPU */
3506 + int preempt_count; /* 0 => preemptable,
3507 + <0 => BUG */
3508 + mm_segment_t addr_limit; /* thread address space:
3509 + 0-0xBFFFFFFF for user
3510 + 0-0xFFFFFFFF for kernel */
3511 + struct restart_block restart_block;
3512 + struct thread_info *real_thread; /* Points to non-IRQ stack */
3513 +};
3514 +
3515 +#define INIT_THREAD_INFO(tsk) \
3516 +{ \
3517 + .task = &tsk, \
3518 + .exec_domain = &default_exec_domain, \
3519 + .flags = 0, \
3520 + .cpu = 0, \
3521 + .preempt_count = INIT_PREEMPT_COUNT, \
3522 + .addr_limit = KERNEL_DS, \
3523 + .restart_block = { \
3524 + .fn = do_no_restart_syscall, \
3525 + }, \
3526 + .real_thread = NULL, \
3527 +}
3528 +
3529 +#define init_thread_info (init_thread_union.thread_info)
3530 +#define init_stack (init_thread_union.stack)
3531 +
3532 +#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
3533 +/* how to get the thread information struct from C */
3534 +static inline struct thread_info *current_thread_info(void)
3535 +{
3536 + struct thread_info *ti;
3537 + unsigned long mask = THREAD_SIZE - 1;
3538 + void *p;
3539 +
3540 + asm volatile ("" : "=r" (p) : "0" (&ti));
3541 + ti = (struct thread_info *) (((unsigned long)p) & ~mask);
3542 + return ti;
3543 +}
3544 +
3545 +#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
3546 +
3547 +#endif
3548 +
3549 +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
3550 +#define TIF_SIGPENDING 1 /* signal pending */
3551 +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
3552 +#define TIF_RESTART_BLOCK 4
3553 +#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
3554 +#define TIF_SYSCALL_AUDIT 6
3555 +#define TIF_RESTORE_SIGMASK 7
3556 +#define TIF_NOTIFY_RESUME 8
3557 +
3558 +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
3559 +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
3560 +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
3561 +#define _TIF_MEMDIE (1 << TIF_MEMDIE)
3562 +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
3563 +
3564 +#endif
3565 --- /dev/null
3566 +++ b/arch/um/include/uapi/asm/timex.h
3567 @@ -0,0 +1,13 @@
3568 +#ifndef __UM_TIMEX_H
3569 +#define __UM_TIMEX_H
3570 +
3571 +typedef unsigned long cycles_t;
3572 +
3573 +static inline cycles_t get_cycles (void)
3574 +{
3575 + return 0;
3576 +}
3577 +
3578 +#define CLOCK_TICK_RATE (HZ)
3579 +
3580 +#endif
3581 --- /dev/null
3582 +++ b/arch/um/include/uapi/asm/tlb.h
3583 @@ -0,0 +1,134 @@
3584 +#ifndef __UM_TLB_H
3585 +#define __UM_TLB_H
3586 +
3587 +#include <linux/pagemap.h>
3588 +#include <linux/swap.h>
3589 +#include <asm/percpu.h>
3590 +#include <asm/pgalloc.h>
3591 +#include <asm/tlbflush.h>
3592 +
3593 +#define tlb_start_vma(tlb, vma) do { } while (0)
3594 +#define tlb_end_vma(tlb, vma) do { } while (0)
3595 +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
3596 +
3597 +/* struct mmu_gather is an opaque type used by the mm code for passing around
3598 + * any data needed by arch specific code for tlb_remove_page.
3599 + */
3600 +struct mmu_gather {
3601 + struct mm_struct *mm;
3602 + unsigned int need_flush; /* Really unmapped some ptes? */
3603 + unsigned long start;
3604 + unsigned long end;
3605 + unsigned int fullmm; /* non-zero means full mm flush */
3606 +};
3607 +
3608 +static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
3609 + unsigned long address)
3610 +{
3611 + if (tlb->start > address)
3612 + tlb->start = address;
3613 + if (tlb->end < address + PAGE_SIZE)
3614 + tlb->end = address + PAGE_SIZE;
3615 +}
3616 +
3617 +static inline void init_tlb_gather(struct mmu_gather *tlb)
3618 +{
3619 + tlb->need_flush = 0;
3620 +
3621 + tlb->start = TASK_SIZE;
3622 + tlb->end = 0;
3623 +
3624 + if (tlb->fullmm) {
3625 + tlb->start = 0;
3626 + tlb->end = TASK_SIZE;
3627 + }
3628 +}
3629 +
3630 +static inline void
3631 +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
3632 +{
3633 + tlb->mm = mm;
3634 + tlb->start = start;
3635 + tlb->end = end;
3636 + tlb->fullmm = !(start | (end+1));
3637 +
3638 + init_tlb_gather(tlb);
3639 +}
3640 +
3641 +extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
3642 + unsigned long end);
3643 +
3644 +static inline void
3645 +tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
3646 +{
3647 + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
3648 +}
3649 +
3650 +static inline void
3651 +tlb_flush_mmu_free(struct mmu_gather *tlb)
3652 +{
3653 + init_tlb_gather(tlb);
3654 +}
3655 +
3656 +static inline void
3657 +tlb_flush_mmu(struct mmu_gather *tlb)
3658 +{
3659 + if (!tlb->need_flush)
3660 + return;
3661 +
3662 + tlb_flush_mmu_tlbonly(tlb);
3663 + tlb_flush_mmu_free(tlb);
3664 +}
3665 +
3666 +/* tlb_finish_mmu
3667 + * Called at the end of the shootdown operation to free up any resources
3668 + * that were required.
3669 + */
3670 +static inline void
3671 +tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
3672 +{
3673 + tlb_flush_mmu(tlb);
3674 +
3675 + /* keep the page table cache within bounds */
3676 + check_pgt_cache();
3677 +}
3678 +
3679 +/* tlb_remove_page
3680 + * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
3681 + * while handling the additional races in SMP caused by other CPUs
3682 + * caching valid mappings in their TLBs.
3683 + */
3684 +static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
3685 +{
3686 + tlb->need_flush = 1;
3687 + free_page_and_swap_cache(page);
3688 + return 1; /* avoid calling tlb_flush_mmu */
3689 +}
3690 +
3691 +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
3692 +{
3693 + __tlb_remove_page(tlb, page);
3694 +}
3695 +
3696 +/**
3697 + * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
3698 + *
3699 + * Record the fact that pte's were really umapped in ->need_flush, so we can
3700 + * later optimise away the tlb invalidate. This helps when userspace is
3701 + * unmapping already-unmapped pages, which happens quite a lot.
3702 + */
3703 +#define tlb_remove_tlb_entry(tlb, ptep, address) \
3704 + do { \
3705 + tlb->need_flush = 1; \
3706 + __tlb_remove_tlb_entry(tlb, ptep, address); \
3707 + } while (0)
3708 +
3709 +#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
3710 +
3711 +#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
3712 +
3713 +#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
3714 +
3715 +#define tlb_migrate_finish(mm) do {} while (0)
3716 +
3717 +#endif
3718 --- /dev/null
3719 +++ b/arch/um/include/uapi/asm/tlbflush.h
3720 @@ -0,0 +1,31 @@
3721 +/*
3722 + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3723 + * Licensed under the GPL
3724 + */
3725 +
3726 +#ifndef __UM_TLBFLUSH_H
3727 +#define __UM_TLBFLUSH_H
3728 +
3729 +#include <linux/mm.h>
3730 +
3731 +/*
3732 + * TLB flushing:
3733 + *
3734 + * - flush_tlb() flushes the current mm struct TLBs
3735 + * - flush_tlb_all() flushes all processes TLBs
3736 + * - flush_tlb_mm(mm) flushes the specified mm context TLB's
3737 + * - flush_tlb_page(vma, vmaddr) flushes one page
3738 + * - flush_tlb_kernel_vm() flushes the kernel vm area
3739 + * - flush_tlb_range(vma, start, end) flushes a range of pages
3740 + */
3741 +
3742 +extern void flush_tlb_all(void);
3743 +extern void flush_tlb_mm(struct mm_struct *mm);
3744 +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
3745 + unsigned long end);
3746 +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
3747 +extern void flush_tlb_kernel_vm(void);
3748 +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
3749 +extern void __flush_tlb_one(unsigned long addr);
3750 +
3751 +#endif
3752 --- /dev/null
3753 +++ b/arch/um/include/uapi/asm/uaccess.h
3754 @@ -0,0 +1,178 @@
3755 +/*
3756 + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3757 + * Licensed under the GPL
3758 + */
3759 +
3760 +#ifndef __UM_UACCESS_H
3761 +#define __UM_UACCESS_H
3762 +
3763 +/* thread_info has a mm_segment_t in it, so put the definition up here */
3764 +typedef struct {
3765 + unsigned long seg;
3766 +} mm_segment_t;
3767 +
3768 +#include <linux/thread_info.h>
3769 +#include <linux/errno.h>
3770 +#include <asm/processor.h>
3771 +#include <asm/elf.h>
3772 +
3773 +#define VERIFY_READ 0
3774 +#define VERIFY_WRITE 1
3775 +
3776 +/*
3777 + * The fs value determines whether argument validity checking should be
3778 + * performed or not. If get_fs() == USER_DS, checking is performed, with
3779 + * get_fs() == KERNEL_DS, checking is bypassed.
3780 + *
3781 + * For historical reasons, these macros are grossly misnamed.
3782 + */
3783 +
3784 +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
3785 +
3786 +#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
3787 +#define USER_DS MAKE_MM_SEG(TASK_SIZE)
3788 +
3789 +#define get_ds() (KERNEL_DS)
3790 +#define get_fs() (current_thread_info()->addr_limit)
3791 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
3792 +
3793 +#define segment_eq(a, b) ((a).seg == (b).seg)
3794 +
3795 +#define __under_task_size(addr, size) \
3796 + (((unsigned long) (addr) < TASK_SIZE) && \
3797 + (((unsigned long) (addr) + (size)) < TASK_SIZE))
3798 +
3799 +#define __access_ok_vsyscall(type, addr, size) \
3800 + ((type == VERIFY_READ) && \
3801 + ((unsigned long) (addr) >= FIXADDR_USER_START) && \
3802 + ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
3803 + ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
3804 +
3805 +#define __addr_range_nowrap(addr, size) \
3806 + ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
3807 +
3808 +#define access_ok(type, addr, size) \
3809 + (__addr_range_nowrap(addr, size) && \
3810 + (__under_task_size(addr, size) || \
3811 + __access_ok_vsyscall(type, addr, size) || \
3812 + segment_eq(get_fs(), KERNEL_DS)))
3813 +
3814 +extern int copy_from_user(void *to, const void __user *from, int n);
3815 +extern int copy_to_user(void __user *to, const void *from, int n);
3816 +
3817 +/*
3818 + * strncpy_from_user: - Copy a NUL terminated string from userspace.
3819 + * @dst: Destination address, in kernel space. This buffer must be at
3820 + * least @count bytes long.
3821 + * @src: Source address, in user space.
3822 + * @count: Maximum number of bytes to copy, including the trailing NUL.
3823 + *
3824 + * Copies a NUL-terminated string from userspace to kernel space.
3825 + *
3826 + * On success, returns the length of the string (not including the trailing
3827 + * NUL).
3828 + *
3829 + * If access to userspace fails, returns -EFAULT (some data may have been
3830 + * copied).
3831 + *
3832 + * If @count is smaller than the length of the string, copies @count bytes
3833 + * and returns @count.
3834 + */
3835 +
3836 +extern int strncpy_from_user(char *dst, const char __user *src, int count);
3837 +
3838 +/*
3839 + * __clear_user: - Zero a block of memory in user space, with less checking.
3840 + * @to: Destination address, in user space.
3841 + * @n: Number of bytes to zero.
3842 + *
3843 + * Zero a block of memory in user space. Caller must check
3844 + * the specified block with access_ok() before calling this function.
3845 + *
3846 + * Returns number of bytes that could not be cleared.
3847 + * On success, this will be zero.
3848 + */
3849 +extern int __clear_user(void __user *mem, int len);
3850 +
3851 +/*
3852 + * clear_user: - Zero a block of memory in user space.
3853 + * @to: Destination address, in user space.
3854 + * @n: Number of bytes to zero.
3855 + *
3856 + * Zero a block of memory in user space.
3857 + *
3858 + * Returns number of bytes that could not be cleared.
3859 + * On success, this will be zero.
3860 + */
3861 +extern int clear_user(void __user *mem, int len);
3862 +
3863 +/*
3864 + * strlen_user: - Get the size of a string in user space.
3865 + * @str: The string to measure.
3866 + * @n: The maximum valid length
3867 + *
3868 + * Get the size of a NUL-terminated string in user space.
3869 + *
3870 + * Returns the size of the string INCLUDING the terminating NUL.
3871 + * On exception, returns 0.
3872 + * If the string is too long, returns a value greater than @n.
3873 + */
3874 +extern int strnlen_user(const void __user *str, int len);
3875 +
3876 +#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
3877 +
3878 +#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
3879 +
3880 +#define __copy_to_user_inatomic __copy_to_user
3881 +#define __copy_from_user_inatomic __copy_from_user
3882 +
3883 +#define __get_user(x, ptr) \
3884 +({ \
3885 + const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
3886 + __typeof__(x) __private_val; \
3887 + int __private_ret = -EFAULT; \
3888 + (x) = (__typeof__(*(__private_ptr)))0; \
3889 + if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
3890 + sizeof(*(__private_ptr))) == 0) { \
3891 + (x) = (__typeof__(*(__private_ptr))) __private_val; \
3892 + __private_ret = 0; \
3893 + } \
3894 + __private_ret; \
3895 +})
3896 +
3897 +#define get_user(x, ptr) \
3898 +({ \
3899 + const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
3900 + (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
3901 + __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
3902 +})
3903 +
3904 +#define __put_user(x, ptr) \
3905 +({ \
3906 + __typeof__(*(ptr)) __user *__private_ptr = ptr; \
3907 + __typeof__(*(__private_ptr)) __private_val; \
3908 + int __private_ret = -EFAULT; \
3909 + __private_val = (__typeof__(*(__private_ptr))) (x); \
3910 + if (__copy_to_user((__private_ptr), &__private_val, \
3911 + sizeof(*(__private_ptr))) == 0) { \
3912 + __private_ret = 0; \
3913 + } \
3914 + __private_ret; \
3915 +})
3916 +
3917 +#define put_user(x, ptr) \
3918 +({ \
3919 + __typeof__(*(ptr)) __user *private_ptr = (ptr); \
3920 + (access_ok(VERIFY_WRITE, private_ptr, sizeof(*private_ptr)) ? \
3921 + __put_user(x, private_ptr) : -EFAULT); \
3922 +})
3923 +
3924 +#define strlen_user(str) strnlen_user(str, ~0U >> 1)
3925 +
3926 +struct exception_table_entry
3927 +{
3928 + unsigned long insn;
3929 + unsigned long fixup;
3930 +};
3931 +
3932 +#endif