uml: bump to 3.14.16
[openwrt/openwrt.git] / target / linux / uml / patches-3.14 / 001-fix_make_headers_install.patch
1 From faec6b6c2cc0219e74569c13f581fc11d8f3fc57 Mon Sep 17 00:00:00 2001
2 From: Florian Fainelli <florian@openwrt.org>
3 Date: Sun, 17 Mar 2013 20:12:10 +0100
4 Subject: [PATCH] UM: fix make headers_install after UAPI header installation
5
6 Commit 10b63956 (UAPI: Plumb the UAPI Kbuilds into the user
7 header installation and checking) breaks UML make headers_install with
8 the following:
9
10 $ ARCH=um make headers_install
11 CHK include/generated/uapi/linux/version.h
12 UPD include/generated/uapi/linux/version.h
13 HOSTCC scripts/basic/fixdep
14 WRAP arch/um/include/generated/asm/bug.h
15 [snip]
16 WRAP arch/um/include/generated/asm/trace_clock.h
17 SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_32.h
18 SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_64.h
19 SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_x32.h
20 SYSTBL arch/x86/syscalls/../include/generated/asm/syscalls_32.h
21 HOSTCC scripts/unifdef
22 Makefile:912: *** Headers not exportable for the um architecture. Stop.
23 zsh: exit 2 ARCH=um make headers_install
24
25 The reason for that is because the top-level Makefile does the
26 following:
27 $(if $(wildcard $(srctree)/arch/$(hdr-arch)/include/uapi/asm/Kbuild),, \
28 $(error Headers not exportable for the $(SRCARCH) architecture))
29
30 we end-up in the else part of the $(if) statement because UML still uses
31 the old path in arch/um/include/asm/Kbuild. This patch fixes the issue
32 by moving the header files to be in arch/um/include/uapi/asm/ thus
33 making headers_install (and other make targets checking for uapi) to
34 succeed.
35
36 Signed-off-by: Florian Fainelli <florian@openwrt.org>
37 ---
38 Richard, this has been broken for 3.7+ onwards, if you want me to send
39 you separate patches for 3.7 and 3.8 let me know. Thanks!
40
41 arch/um/include/{ => uapi}/asm/Kbuild | 0
42 arch/um/include/{ => uapi}/asm/a.out-core.h | 0
43 arch/um/include/{ => uapi}/asm/bugs.h | 0
44 arch/um/include/{ => uapi}/asm/cache.h | 0
45 arch/um/include/{ => uapi}/asm/common.lds.S | 0
46 arch/um/include/{ => uapi}/asm/dma.h | 0
47 arch/um/include/{ => uapi}/asm/fixmap.h | 0
48 arch/um/include/{ => uapi}/asm/irq.h | 0
49 arch/um/include/{ => uapi}/asm/irqflags.h | 0
50 arch/um/include/{ => uapi}/asm/kmap_types.h | 0
51 arch/um/include/{ => uapi}/asm/kvm_para.h | 0
52 arch/um/include/{ => uapi}/asm/mmu.h | 0
53 arch/um/include/{ => uapi}/asm/mmu_context.h | 0
54 arch/um/include/{ => uapi}/asm/page.h | 0
55 arch/um/include/{ => uapi}/asm/pgalloc.h | 0
56 arch/um/include/{ => uapi}/asm/pgtable-2level.h | 0
57 arch/um/include/{ => uapi}/asm/pgtable-3level.h | 0
58 arch/um/include/{ => uapi}/asm/pgtable.h | 0
59 arch/um/include/{ => uapi}/asm/processor-generic.h | 0
60 arch/um/include/{ => uapi}/asm/ptrace-generic.h | 0
61 arch/um/include/{ => uapi}/asm/setup.h | 0
62 arch/um/include/{ => uapi}/asm/smp.h | 0
63 arch/um/include/{ => uapi}/asm/sysrq.h | 0
64 arch/um/include/{ => uapi}/asm/thread_info.h | 0
65 arch/um/include/{ => uapi}/asm/timex.h | 0
66 arch/um/include/{ => uapi}/asm/tlb.h | 0
67 arch/um/include/{ => uapi}/asm/tlbflush.h | 0
68 arch/um/include/{ => uapi}/asm/uaccess.h | 0
69 28 files changed, 0 insertions(+), 0 deletions(-)
70 rename arch/um/include/{ => uapi}/asm/Kbuild (100%)
71 rename arch/um/include/{ => uapi}/asm/a.out-core.h (100%)
72 rename arch/um/include/{ => uapi}/asm/bugs.h (100%)
73 rename arch/um/include/{ => uapi}/asm/cache.h (100%)
74 rename arch/um/include/{ => uapi}/asm/common.lds.S (100%)
75 rename arch/um/include/{ => uapi}/asm/dma.h (100%)
76 rename arch/um/include/{ => uapi}/asm/fixmap.h (100%)
77 rename arch/um/include/{ => uapi}/asm/irq.h (100%)
78 rename arch/um/include/{ => uapi}/asm/irqflags.h (100%)
79 rename arch/um/include/{ => uapi}/asm/kmap_types.h (100%)
80 rename arch/um/include/{ => uapi}/asm/kvm_para.h (100%)
81 rename arch/um/include/{ => uapi}/asm/mmu.h (100%)
82 rename arch/um/include/{ => uapi}/asm/mmu_context.h (100%)
83 rename arch/um/include/{ => uapi}/asm/page.h (100%)
84 rename arch/um/include/{ => uapi}/asm/pgalloc.h (100%)
85 rename arch/um/include/{ => uapi}/asm/pgtable-2level.h (100%)
86 rename arch/um/include/{ => uapi}/asm/pgtable-3level.h (100%)
87 rename arch/um/include/{ => uapi}/asm/pgtable.h (100%)
88 rename arch/um/include/{ => uapi}/asm/processor-generic.h (100%)
89 rename arch/um/include/{ => uapi}/asm/ptrace-generic.h (100%)
90 rename arch/um/include/{ => uapi}/asm/setup.h (100%)
91 rename arch/um/include/{ => uapi}/asm/smp.h (100%)
92 rename arch/um/include/{ => uapi}/asm/sysrq.h (100%)
93 rename arch/um/include/{ => uapi}/asm/thread_info.h (100%)
94 rename arch/um/include/{ => uapi}/asm/timex.h (100%)
95 rename arch/um/include/{ => uapi}/asm/tlb.h (100%)
96 rename arch/um/include/{ => uapi}/asm/tlbflush.h (100%)
97 rename arch/um/include/{ => uapi}/asm/uaccess.h (100%)
98
99 --- a/arch/um/include/asm/Kbuild
100 +++ /dev/null
101 @@ -1,8 +0,0 @@
102 -generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
103 -generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
104 -generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
105 -generic-y += switch_to.h clkdev.h
106 -generic-y += trace_clock.h
107 -generic-y += preempt.h
108 -generic-y += hash.h
109 -generic-y += barrier.h
110 --- a/arch/um/include/asm/a.out-core.h
111 +++ /dev/null
112 @@ -1,27 +0,0 @@
113 -/* a.out coredump register dumper
114 - *
115 - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
116 - * Written by David Howells (dhowells@redhat.com)
117 - *
118 - * This program is free software; you can redistribute it and/or
119 - * modify it under the terms of the GNU General Public Licence
120 - * as published by the Free Software Foundation; either version
121 - * 2 of the Licence, or (at your option) any later version.
122 - */
123 -
124 -#ifndef __UM_A_OUT_CORE_H
125 -#define __UM_A_OUT_CORE_H
126 -
127 -#ifdef __KERNEL__
128 -
129 -#include <linux/user.h>
130 -
131 -/*
132 - * fill in the user structure for an a.out core dump
133 - */
134 -static inline void aout_dump_thread(struct pt_regs *regs, struct user *u)
135 -{
136 -}
137 -
138 -#endif /* __KERNEL__ */
139 -#endif /* __UM_A_OUT_CORE_H */
140 --- a/arch/um/include/asm/bugs.h
141 +++ /dev/null
142 @@ -1,6 +0,0 @@
143 -#ifndef __UM_BUGS_H
144 -#define __UM_BUGS_H
145 -
146 -void check_bugs(void);
147 -
148 -#endif
149 --- a/arch/um/include/asm/cache.h
150 +++ /dev/null
151 @@ -1,17 +0,0 @@
152 -#ifndef __UM_CACHE_H
153 -#define __UM_CACHE_H
154 -
155 -
156 -#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
157 -# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
158 -#elif defined(CONFIG_UML_X86) /* 64-bit */
159 -# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
160 -#else
161 -/* XXX: this was taken from x86, now it's completely random. Luckily only
162 - * affects SMP padding. */
163 -# define L1_CACHE_SHIFT 5
164 -#endif
165 -
166 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
167 -
168 -#endif
169 --- a/arch/um/include/asm/common.lds.S
170 +++ /dev/null
171 @@ -1,107 +0,0 @@
172 -#include <asm-generic/vmlinux.lds.h>
173 -
174 - .fini : { *(.fini) } =0x9090
175 - _etext = .;
176 - PROVIDE (etext = .);
177 -
178 - . = ALIGN(4096);
179 - _sdata = .;
180 - PROVIDE (sdata = .);
181 -
182 - RODATA
183 -
184 - .unprotected : { *(.unprotected) }
185 - . = ALIGN(4096);
186 - PROVIDE (_unprotected_end = .);
187 -
188 - . = ALIGN(4096);
189 - .note : { *(.note.*) }
190 - EXCEPTION_TABLE(0)
191 -
192 - BUG_TABLE
193 -
194 - .uml.setup.init : {
195 - __uml_setup_start = .;
196 - *(.uml.setup.init)
197 - __uml_setup_end = .;
198 - }
199 -
200 - .uml.help.init : {
201 - __uml_help_start = .;
202 - *(.uml.help.init)
203 - __uml_help_end = .;
204 - }
205 -
206 - .uml.postsetup.init : {
207 - __uml_postsetup_start = .;
208 - *(.uml.postsetup.init)
209 - __uml_postsetup_end = .;
210 - }
211 -
212 - .init.setup : {
213 - INIT_SETUP(0)
214 - }
215 -
216 - PERCPU_SECTION(32)
217 -
218 - .initcall.init : {
219 - INIT_CALLS
220 - }
221 -
222 - .con_initcall.init : {
223 - CON_INITCALL
224 - }
225 -
226 - .uml.initcall.init : {
227 - __uml_initcall_start = .;
228 - *(.uml.initcall.init)
229 - __uml_initcall_end = .;
230 - }
231 -
232 - SECURITY_INIT
233 -
234 - .exitcall : {
235 - __exitcall_begin = .;
236 - *(.exitcall.exit)
237 - __exitcall_end = .;
238 - }
239 -
240 - .uml.exitcall : {
241 - __uml_exitcall_begin = .;
242 - *(.uml.exitcall.exit)
243 - __uml_exitcall_end = .;
244 - }
245 -
246 - . = ALIGN(4);
247 - .altinstructions : {
248 - __alt_instructions = .;
249 - *(.altinstructions)
250 - __alt_instructions_end = .;
251 - }
252 - .altinstr_replacement : { *(.altinstr_replacement) }
253 - /* .exit.text is discard at runtime, not link time, to deal with references
254 - from .altinstructions and .eh_frame */
255 - .exit.text : { *(.exit.text) }
256 - .exit.data : { *(.exit.data) }
257 -
258 - .preinit_array : {
259 - __preinit_array_start = .;
260 - *(.preinit_array)
261 - __preinit_array_end = .;
262 - }
263 - .init_array : {
264 - __init_array_start = .;
265 - *(.init_array)
266 - __init_array_end = .;
267 - }
268 - .fini_array : {
269 - __fini_array_start = .;
270 - *(.fini_array)
271 - __fini_array_end = .;
272 - }
273 -
274 - . = ALIGN(4096);
275 - .init.ramfs : {
276 - INIT_RAM_FS
277 - }
278 -
279 --- a/arch/um/include/asm/dma.h
280 +++ /dev/null
281 @@ -1,10 +0,0 @@
282 -#ifndef __UM_DMA_H
283 -#define __UM_DMA_H
284 -
285 -#include <asm/io.h>
286 -
287 -extern unsigned long uml_physmem;
288 -
289 -#define MAX_DMA_ADDRESS (uml_physmem)
290 -
291 -#endif
292 --- a/arch/um/include/asm/fixmap.h
293 +++ /dev/null
294 @@ -1,60 +0,0 @@
295 -#ifndef __UM_FIXMAP_H
296 -#define __UM_FIXMAP_H
297 -
298 -#include <asm/processor.h>
299 -#include <asm/kmap_types.h>
300 -#include <asm/archparam.h>
301 -#include <asm/page.h>
302 -#include <linux/threads.h>
303 -
304 -/*
305 - * Here we define all the compile-time 'special' virtual
306 - * addresses. The point is to have a constant address at
307 - * compile time, but to set the physical address only
308 - * in the boot process. We allocate these special addresses
309 - * from the end of virtual memory (0xfffff000) backwards.
310 - * Also this lets us do fail-safe vmalloc(), we
311 - * can guarantee that these special addresses and
312 - * vmalloc()-ed addresses never overlap.
313 - *
314 - * these 'compile-time allocated' memory buffers are
315 - * fixed-size 4k pages. (or larger if used with an increment
316 - * highger than 1) use fixmap_set(idx,phys) to associate
317 - * physical memory with fixmap indices.
318 - *
319 - * TLB entries of such buffers will not be flushed across
320 - * task switches.
321 - */
322 -
323 -/*
324 - * on UP currently we will have no trace of the fixmap mechanizm,
325 - * no page table allocations, etc. This might change in the
326 - * future, say framebuffers for the console driver(s) could be
327 - * fix-mapped?
328 - */
329 -enum fixed_addresses {
330 -#ifdef CONFIG_HIGHMEM
331 - FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
332 - FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
333 -#endif
334 - __end_of_fixed_addresses
335 -};
336 -
337 -extern void __set_fixmap (enum fixed_addresses idx,
338 - unsigned long phys, pgprot_t flags);
339 -
340 -/*
341 - * used by vmalloc.c.
342 - *
343 - * Leave one empty page between vmalloc'ed areas and
344 - * the start of the fixmap, and leave one page empty
345 - * at the top of mem..
346 - */
347 -
348 -#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
349 -#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
350 -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
351 -
352 -#include <asm-generic/fixmap.h>
353 -
354 -#endif
355 --- a/arch/um/include/asm/irq.h
356 +++ /dev/null
357 @@ -1,23 +0,0 @@
358 -#ifndef __UM_IRQ_H
359 -#define __UM_IRQ_H
360 -
361 -#define TIMER_IRQ 0
362 -#define UMN_IRQ 1
363 -#define CONSOLE_IRQ 2
364 -#define CONSOLE_WRITE_IRQ 3
365 -#define UBD_IRQ 4
366 -#define UM_ETH_IRQ 5
367 -#define SSL_IRQ 6
368 -#define SSL_WRITE_IRQ 7
369 -#define ACCEPT_IRQ 8
370 -#define MCONSOLE_IRQ 9
371 -#define WINCH_IRQ 10
372 -#define SIGIO_WRITE_IRQ 11
373 -#define TELNETD_IRQ 12
374 -#define XTERM_IRQ 13
375 -#define RANDOM_IRQ 14
376 -
377 -#define LAST_IRQ RANDOM_IRQ
378 -#define NR_IRQS (LAST_IRQ + 1)
379 -
380 -#endif
381 --- a/arch/um/include/asm/irqflags.h
382 +++ /dev/null
383 @@ -1,42 +0,0 @@
384 -#ifndef __UM_IRQFLAGS_H
385 -#define __UM_IRQFLAGS_H
386 -
387 -extern int get_signals(void);
388 -extern int set_signals(int enable);
389 -extern void block_signals(void);
390 -extern void unblock_signals(void);
391 -
392 -static inline unsigned long arch_local_save_flags(void)
393 -{
394 - return get_signals();
395 -}
396 -
397 -static inline void arch_local_irq_restore(unsigned long flags)
398 -{
399 - set_signals(flags);
400 -}
401 -
402 -static inline void arch_local_irq_enable(void)
403 -{
404 - unblock_signals();
405 -}
406 -
407 -static inline void arch_local_irq_disable(void)
408 -{
409 - block_signals();
410 -}
411 -
412 -static inline unsigned long arch_local_irq_save(void)
413 -{
414 - unsigned long flags;
415 - flags = arch_local_save_flags();
416 - arch_local_irq_disable();
417 - return flags;
418 -}
419 -
420 -static inline bool arch_irqs_disabled(void)
421 -{
422 - return arch_local_save_flags() == 0;
423 -}
424 -
425 -#endif
426 --- a/arch/um/include/asm/kmap_types.h
427 +++ /dev/null
428 @@ -1,13 +0,0 @@
429 -/*
430 - * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
431 - * Licensed under the GPL
432 - */
433 -
434 -#ifndef __UM_KMAP_TYPES_H
435 -#define __UM_KMAP_TYPES_H
436 -
437 -/* No more #include "asm/arch/kmap_types.h" ! */
438 -
439 -#define KM_TYPE_NR 14
440 -
441 -#endif
442 --- a/arch/um/include/asm/kvm_para.h
443 +++ /dev/null
444 @@ -1 +0,0 @@
445 -#include <asm-generic/kvm_para.h>
446 --- a/arch/um/include/asm/mmu.h
447 +++ /dev/null
448 @@ -1,24 +0,0 @@
449 -/*
450 - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
451 - * Licensed under the GPL
452 - */
453 -
454 -#ifndef __ARCH_UM_MMU_H
455 -#define __ARCH_UM_MMU_H
456 -
457 -#include <mm_id.h>
458 -#include <asm/mm_context.h>
459 -
460 -typedef struct mm_context {
461 - struct mm_id id;
462 - struct uml_arch_mm_context arch;
463 - struct page *stub_pages[2];
464 -} mm_context_t;
465 -
466 -extern void __switch_mm(struct mm_id * mm_idp);
467 -
468 -/* Avoid tangled inclusion with asm/ldt.h */
469 -extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
470 -extern void free_ldt(struct mm_context *mm);
471 -
472 -#endif
473 --- a/arch/um/include/asm/mmu_context.h
474 +++ /dev/null
475 @@ -1,58 +0,0 @@
476 -/*
477 - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
478 - * Licensed under the GPL
479 - */
480 -
481 -#ifndef __UM_MMU_CONTEXT_H
482 -#define __UM_MMU_CONTEXT_H
483 -
484 -#include <linux/sched.h>
485 -#include <asm/mmu.h>
486 -
487 -extern void uml_setup_stubs(struct mm_struct *mm);
488 -extern void arch_exit_mmap(struct mm_struct *mm);
489 -
490 -#define deactivate_mm(tsk,mm) do { } while (0)
491 -
492 -extern void force_flush_all(void);
493 -
494 -static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
495 -{
496 - /*
497 - * This is called by fs/exec.c and sys_unshare()
498 - * when the new ->mm is used for the first time.
499 - */
500 - __switch_mm(&new->context.id);
501 - down_write(&new->mmap_sem);
502 - uml_setup_stubs(new);
503 - up_write(&new->mmap_sem);
504 -}
505 -
506 -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
507 - struct task_struct *tsk)
508 -{
509 - unsigned cpu = smp_processor_id();
510 -
511 - if(prev != next){
512 - cpumask_clear_cpu(cpu, mm_cpumask(prev));
513 - cpumask_set_cpu(cpu, mm_cpumask(next));
514 - if(next != &init_mm)
515 - __switch_mm(&next->context.id);
516 - }
517 -}
518 -
519 -static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
520 -{
521 - uml_setup_stubs(mm);
522 -}
523 -
524 -static inline void enter_lazy_tlb(struct mm_struct *mm,
525 - struct task_struct *tsk)
526 -{
527 -}
528 -
529 -extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
530 -
531 -extern void destroy_context(struct mm_struct *mm);
532 -
533 -#endif
534 --- a/arch/um/include/asm/page.h
535 +++ /dev/null
536 @@ -1,122 +0,0 @@
537 -/*
538 - * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
539 - * Copyright 2003 PathScale, Inc.
540 - * Licensed under the GPL
541 - */
542 -
543 -#ifndef __UM_PAGE_H
544 -#define __UM_PAGE_H
545 -
546 -#include <linux/const.h>
547 -
548 -/* PAGE_SHIFT determines the page size */
549 -#define PAGE_SHIFT 12
550 -#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
551 -#define PAGE_MASK (~(PAGE_SIZE-1))
552 -
553 -#ifndef __ASSEMBLY__
554 -
555 -struct page;
556 -
557 -#include <linux/types.h>
558 -#include <asm/vm-flags.h>
559 -
560 -/*
561 - * These are used to make use of C type-checking..
562 - */
563 -
564 -#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
565 -#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
566 -
567 -#define clear_user_page(page, vaddr, pg) clear_page(page)
568 -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
569 -
570 -#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
571 -
572 -typedef struct { unsigned long pte_low, pte_high; } pte_t;
573 -typedef struct { unsigned long pmd; } pmd_t;
574 -typedef struct { unsigned long pgd; } pgd_t;
575 -#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
576 -
577 -#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
578 -#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
579 -#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
580 -#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
581 - smp_wmb(); \
582 - (to).pte_low = (from).pte_low; })
583 -#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
584 -#define pte_set_val(pte, phys, prot) \
585 - ({ (pte).pte_high = (phys) >> 32; \
586 - (pte).pte_low = (phys) | pgprot_val(prot); })
587 -
588 -#define pmd_val(x) ((x).pmd)
589 -#define __pmd(x) ((pmd_t) { (x) } )
590 -
591 -typedef unsigned long long pfn_t;
592 -typedef unsigned long long phys_t;
593 -
594 -#else
595 -
596 -typedef struct { unsigned long pte; } pte_t;
597 -typedef struct { unsigned long pgd; } pgd_t;
598 -
599 -#ifdef CONFIG_3_LEVEL_PGTABLES
600 -typedef struct { unsigned long pmd; } pmd_t;
601 -#define pmd_val(x) ((x).pmd)
602 -#define __pmd(x) ((pmd_t) { (x) } )
603 -#endif
604 -
605 -#define pte_val(x) ((x).pte)
606 -
607 -
608 -#define pte_get_bits(p, bits) ((p).pte & (bits))
609 -#define pte_set_bits(p, bits) ((p).pte |= (bits))
610 -#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
611 -#define pte_copy(to, from) ((to).pte = (from).pte)
612 -#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
613 -#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
614 -
615 -typedef unsigned long pfn_t;
616 -typedef unsigned long phys_t;
617 -
618 -#endif
619 -
620 -typedef struct { unsigned long pgprot; } pgprot_t;
621 -
622 -typedef struct page *pgtable_t;
623 -
624 -#define pgd_val(x) ((x).pgd)
625 -#define pgprot_val(x) ((x).pgprot)
626 -
627 -#define __pte(x) ((pte_t) { (x) } )
628 -#define __pgd(x) ((pgd_t) { (x) } )
629 -#define __pgprot(x) ((pgprot_t) { (x) } )
630 -
631 -extern unsigned long uml_physmem;
632 -
633 -#define PAGE_OFFSET (uml_physmem)
634 -#define KERNELBASE PAGE_OFFSET
635 -
636 -#define __va_space (8*1024*1024)
637 -
638 -#include <mem.h>
639 -
640 -/* Cast to unsigned long before casting to void * to avoid a warning from
641 - * mmap_kmem about cutting a long long down to a void *. Not sure that
642 - * casting is the right thing, but 32-bit UML can't have 64-bit virtual
643 - * addresses
644 - */
645 -#define __pa(virt) to_phys((void *) (unsigned long) (virt))
646 -#define __va(phys) to_virt((unsigned long) (phys))
647 -
648 -#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
649 -#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
650 -
651 -#define pfn_valid(pfn) ((pfn) < max_mapnr)
652 -#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
653 -
654 -#include <asm-generic/memory_model.h>
655 -#include <asm-generic/getorder.h>
656 -
657 -#endif /* __ASSEMBLY__ */
658 -#endif /* __UM_PAGE_H */
659 --- a/arch/um/include/asm/pgalloc.h
660 +++ /dev/null
661 @@ -1,61 +0,0 @@
662 -/*
663 - * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
664 - * Copyright 2003 PathScale, Inc.
665 - * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
666 - * Licensed under the GPL
667 - */
668 -
669 -#ifndef __UM_PGALLOC_H
670 -#define __UM_PGALLOC_H
671 -
672 -#include <linux/mm.h>
673 -
674 -#define pmd_populate_kernel(mm, pmd, pte) \
675 - set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
676 -
677 -#define pmd_populate(mm, pmd, pte) \
678 - set_pmd(pmd, __pmd(_PAGE_TABLE + \
679 - ((unsigned long long)page_to_pfn(pte) << \
680 - (unsigned long long) PAGE_SHIFT)))
681 -#define pmd_pgtable(pmd) pmd_page(pmd)
682 -
683 -/*
684 - * Allocate and free page tables.
685 - */
686 -extern pgd_t *pgd_alloc(struct mm_struct *);
687 -extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
688 -
689 -extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
690 -extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
691 -
692 -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
693 -{
694 - free_page((unsigned long) pte);
695 -}
696 -
697 -static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
698 -{
699 - pgtable_page_dtor(pte);
700 - __free_page(pte);
701 -}
702 -
703 -#define __pte_free_tlb(tlb,pte, address) \
704 -do { \
705 - pgtable_page_dtor(pte); \
706 - tlb_remove_page((tlb),(pte)); \
707 -} while (0)
708 -
709 -#ifdef CONFIG_3_LEVEL_PGTABLES
710 -
711 -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
712 -{
713 - free_page((unsigned long)pmd);
714 -}
715 -
716 -#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
717 -#endif
718 -
719 -#define check_pgt_cache() do { } while (0)
720 -
721 -#endif
722 -
723 --- a/arch/um/include/asm/pgtable-2level.h
724 +++ /dev/null
725 @@ -1,53 +0,0 @@
726 -/*
727 - * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
728 - * Copyright 2003 PathScale, Inc.
729 - * Derived from include/asm-i386/pgtable.h
730 - * Licensed under the GPL
731 - */
732 -
733 -#ifndef __UM_PGTABLE_2LEVEL_H
734 -#define __UM_PGTABLE_2LEVEL_H
735 -
736 -#include <asm-generic/pgtable-nopmd.h>
737 -
738 -/* PGDIR_SHIFT determines what a third-level page table entry can map */
739 -
740 -#define PGDIR_SHIFT 22
741 -#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
742 -#define PGDIR_MASK (~(PGDIR_SIZE-1))
743 -
744 -/*
745 - * entries per page directory level: the i386 is two-level, so
746 - * we don't really have any PMD directory physically.
747 - */
748 -#define PTRS_PER_PTE 1024
749 -#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
750 -#define PTRS_PER_PGD 1024
751 -#define FIRST_USER_ADDRESS 0
752 -
753 -#define pte_ERROR(e) \
754 - printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
755 - pte_val(e))
756 -#define pgd_ERROR(e) \
757 - printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
758 - pgd_val(e))
759 -
760 -static inline int pgd_newpage(pgd_t pgd) { return 0; }
761 -static inline void pgd_mkuptodate(pgd_t pgd) { }
762 -
763 -#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
764 -
765 -#define pte_pfn(x) phys_to_pfn(pte_val(x))
766 -#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
767 -#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
768 -
769 -/*
770 - * Bits 0 through 4 are taken
771 - */
772 -#define PTE_FILE_MAX_BITS 27
773 -
774 -#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
775 -
776 -#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
777 -
778 -#endif
779 --- a/arch/um/include/asm/pgtable-3level.h
780 +++ /dev/null
781 @@ -1,136 +0,0 @@
782 -/*
783 - * Copyright 2003 PathScale Inc
784 - * Derived from include/asm-i386/pgtable.h
785 - * Licensed under the GPL
786 - */
787 -
788 -#ifndef __UM_PGTABLE_3LEVEL_H
789 -#define __UM_PGTABLE_3LEVEL_H
790 -
791 -#include <asm-generic/pgtable-nopud.h>
792 -
793 -/* PGDIR_SHIFT determines what a third-level page table entry can map */
794 -
795 -#ifdef CONFIG_64BIT
796 -#define PGDIR_SHIFT 30
797 -#else
798 -#define PGDIR_SHIFT 31
799 -#endif
800 -#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
801 -#define PGDIR_MASK (~(PGDIR_SIZE-1))
802 -
803 -/* PMD_SHIFT determines the size of the area a second-level page table can
804 - * map
805 - */
806 -
807 -#define PMD_SHIFT 21
808 -#define PMD_SIZE (1UL << PMD_SHIFT)
809 -#define PMD_MASK (~(PMD_SIZE-1))
810 -
811 -/*
812 - * entries per page directory level
813 - */
814 -
815 -#define PTRS_PER_PTE 512
816 -#ifdef CONFIG_64BIT
817 -#define PTRS_PER_PMD 512
818 -#define PTRS_PER_PGD 512
819 -#else
820 -#define PTRS_PER_PMD 1024
821 -#define PTRS_PER_PGD 1024
822 -#endif
823 -
824 -#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
825 -#define FIRST_USER_ADDRESS 0
826 -
827 -#define pte_ERROR(e) \
828 - printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
829 - pte_val(e))
830 -#define pmd_ERROR(e) \
831 - printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
832 - pmd_val(e))
833 -#define pgd_ERROR(e) \
834 - printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
835 - pgd_val(e))
836 -
837 -#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
838 -#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
839 -#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
840 -#define pud_populate(mm, pud, pmd) \
841 - set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
842 -
843 -#ifdef CONFIG_64BIT
844 -#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
845 -#else
846 -#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
847 -#endif
848 -
849 -static inline int pgd_newpage(pgd_t pgd)
850 -{
851 - return(pgd_val(pgd) & _PAGE_NEWPAGE);
852 -}
853 -
854 -static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
855 -
856 -#ifdef CONFIG_64BIT
857 -#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
858 -#else
859 -#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
860 -#endif
861 -
862 -struct mm_struct;
863 -extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
864 -
865 -static inline void pud_clear (pud_t *pud)
866 -{
867 - set_pud(pud, __pud(_PAGE_NEWPAGE));
868 -}
869 -
870 -#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
871 -#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
872 -
873 -/* Find an entry in the second-level page table.. */
874 -#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
875 - pmd_index(address))
876 -
877 -static inline unsigned long pte_pfn(pte_t pte)
878 -{
879 - return phys_to_pfn(pte_val(pte));
880 -}
881 -
882 -static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
883 -{
884 - pte_t pte;
885 - phys_t phys = pfn_to_phys(page_nr);
886 -
887 - pte_set_val(pte, phys, pgprot);
888 - return pte;
889 -}
890 -
891 -static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
892 -{
893 - return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
894 -}
895 -
896 -/*
897 - * Bits 0 through 3 are taken in the low part of the pte,
898 - * put the 32 bits of offset into the high part.
899 - */
900 -#define PTE_FILE_MAX_BITS 32
901 -
902 -#ifdef CONFIG_64BIT
903 -
904 -#define pte_to_pgoff(p) ((p).pte >> 32)
905 -
906 -#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
907 -
908 -#else
909 -
910 -#define pte_to_pgoff(pte) ((pte).pte_high)
911 -
912 -#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
913 -
914 -#endif
915 -
916 -#endif
917 -
918 --- a/arch/um/include/asm/pgtable.h
919 +++ /dev/null
920 @@ -1,375 +0,0 @@
921 -/*
922 - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
923 - * Copyright 2003 PathScale, Inc.
924 - * Derived from include/asm-i386/pgtable.h
925 - * Licensed under the GPL
926 - */
927 -
928 -#ifndef __UM_PGTABLE_H
929 -#define __UM_PGTABLE_H
930 -
931 -#include <asm/fixmap.h>
932 -
933 -#define _PAGE_PRESENT 0x001
934 -#define _PAGE_NEWPAGE 0x002
935 -#define _PAGE_NEWPROT 0x004
936 -#define _PAGE_RW 0x020
937 -#define _PAGE_USER 0x040
938 -#define _PAGE_ACCESSED 0x080
939 -#define _PAGE_DIRTY 0x100
940 -/* If _PAGE_PRESENT is clear, we use these: */
941 -#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
942 -#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
943 - pte_present gives true */
944 -
945 -#ifdef CONFIG_3_LEVEL_PGTABLES
946 -#include <asm/pgtable-3level.h>
947 -#else
948 -#include <asm/pgtable-2level.h>
949 -#endif
950 -
951 -extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
952 -
953 -/* zero page used for uninitialized stuff */
954 -extern unsigned long *empty_zero_page;
955 -
956 -#define pgtable_cache_init() do ; while (0)
957 -
958 -/* Just any arbitrary offset to the start of the vmalloc VM area: the
959 - * current 8MB value just means that there will be a 8MB "hole" after the
960 - * physical memory until the kernel virtual memory starts. That means that
961 - * any out-of-bounds memory accesses will hopefully be caught.
962 - * The vmalloc() routines leaves a hole of 4kB between each vmalloced
963 - * area for the same reason. ;)
964 - */
965 -
966 -extern unsigned long end_iomem;
967 -
968 -#define VMALLOC_OFFSET (__va_space)
969 -#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
970 -#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
971 -#ifdef CONFIG_HIGHMEM
972 -# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
973 -#else
974 -# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
975 -#endif
976 -#define MODULES_VADDR VMALLOC_START
977 -#define MODULES_END VMALLOC_END
978 -#define MODULES_LEN (MODULES_VADDR - MODULES_END)
979 -
980 -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
981 -#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
982 -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
983 -#define __PAGE_KERNEL_EXEC \
984 - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
985 -#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
986 -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
987 -#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
988 -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
989 -#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
990 -#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
991 -
992 -/*
993 - * The i386 can't do page protection for execute, and considers that the same
994 - * are read.
995 - * Also, write permissions imply read permissions. This is the closest we can
996 - * get..
997 - */
998 -#define __P000 PAGE_NONE
999 -#define __P001 PAGE_READONLY
1000 -#define __P010 PAGE_COPY
1001 -#define __P011 PAGE_COPY
1002 -#define __P100 PAGE_READONLY
1003 -#define __P101 PAGE_READONLY
1004 -#define __P110 PAGE_COPY
1005 -#define __P111 PAGE_COPY
1006 -
1007 -#define __S000 PAGE_NONE
1008 -#define __S001 PAGE_READONLY
1009 -#define __S010 PAGE_SHARED
1010 -#define __S011 PAGE_SHARED
1011 -#define __S100 PAGE_READONLY
1012 -#define __S101 PAGE_READONLY
1013 -#define __S110 PAGE_SHARED
1014 -#define __S111 PAGE_SHARED
1015 -
1016 -/*
1017 - * ZERO_PAGE is a global shared page that is always zero: used
1018 - * for zero-mapped memory areas etc..
1019 - */
1020 -#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
1021 -
1022 -#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
1023 -
1024 -#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
1025 -#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
1026 -
1027 -#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
1028 -#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
1029 -
1030 -#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
1031 -#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
1032 -
1033 -#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
1034 -#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
1035 -
1036 -#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
1037 -
1038 -#define pte_page(x) pfn_to_page(pte_pfn(x))
1039 -
1040 -#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
1041 -
1042 -/*
1043 - * =================================
1044 - * Flags checking section.
1045 - * =================================
1046 - */
1047 -
1048 -static inline int pte_none(pte_t pte)
1049 -{
1050 - return pte_is_zero(pte);
1051 -}
1052 -
1053 -/*
1054 - * The following only work if pte_present() is true.
1055 - * Undefined behaviour if not..
1056 - */
1057 -static inline int pte_read(pte_t pte)
1058 -{
1059 - return((pte_get_bits(pte, _PAGE_USER)) &&
1060 - !(pte_get_bits(pte, _PAGE_PROTNONE)));
1061 -}
1062 -
1063 -static inline int pte_exec(pte_t pte){
1064 - return((pte_get_bits(pte, _PAGE_USER)) &&
1065 - !(pte_get_bits(pte, _PAGE_PROTNONE)));
1066 -}
1067 -
1068 -static inline int pte_write(pte_t pte)
1069 -{
1070 - return((pte_get_bits(pte, _PAGE_RW)) &&
1071 - !(pte_get_bits(pte, _PAGE_PROTNONE)));
1072 -}
1073 -
1074 -/*
1075 - * The following only works if pte_present() is not true.
1076 - */
1077 -static inline int pte_file(pte_t pte)
1078 -{
1079 - return pte_get_bits(pte, _PAGE_FILE);
1080 -}
1081 -
1082 -static inline int pte_dirty(pte_t pte)
1083 -{
1084 - return pte_get_bits(pte, _PAGE_DIRTY);
1085 -}
1086 -
1087 -static inline int pte_young(pte_t pte)
1088 -{
1089 - return pte_get_bits(pte, _PAGE_ACCESSED);
1090 -}
1091 -
1092 -static inline int pte_newpage(pte_t pte)
1093 -{
1094 - return pte_get_bits(pte, _PAGE_NEWPAGE);
1095 -}
1096 -
1097 -static inline int pte_newprot(pte_t pte)
1098 -{
1099 - return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
1100 -}
1101 -
1102 -static inline int pte_special(pte_t pte)
1103 -{
1104 - return 0;
1105 -}
1106 -
1107 -/*
1108 - * =================================
1109 - * Flags setting section.
1110 - * =================================
1111 - */
1112 -
1113 -static inline pte_t pte_mknewprot(pte_t pte)
1114 -{
1115 - pte_set_bits(pte, _PAGE_NEWPROT);
1116 - return(pte);
1117 -}
1118 -
1119 -static inline pte_t pte_mkclean(pte_t pte)
1120 -{
1121 - pte_clear_bits(pte, _PAGE_DIRTY);
1122 - return(pte);
1123 -}
1124 -
1125 -static inline pte_t pte_mkold(pte_t pte)
1126 -{
1127 - pte_clear_bits(pte, _PAGE_ACCESSED);
1128 - return(pte);
1129 -}
1130 -
1131 -static inline pte_t pte_wrprotect(pte_t pte)
1132 -{
1133 - pte_clear_bits(pte, _PAGE_RW);
1134 - return(pte_mknewprot(pte));
1135 -}
1136 -
1137 -static inline pte_t pte_mkread(pte_t pte)
1138 -{
1139 - pte_set_bits(pte, _PAGE_USER);
1140 - return(pte_mknewprot(pte));
1141 -}
1142 -
1143 -static inline pte_t pte_mkdirty(pte_t pte)
1144 -{
1145 - pte_set_bits(pte, _PAGE_DIRTY);
1146 - return(pte);
1147 -}
1148 -
1149 -static inline pte_t pte_mkyoung(pte_t pte)
1150 -{
1151 - pte_set_bits(pte, _PAGE_ACCESSED);
1152 - return(pte);
1153 -}
1154 -
1155 -static inline pte_t pte_mkwrite(pte_t pte)
1156 -{
1157 - pte_set_bits(pte, _PAGE_RW);
1158 - return(pte_mknewprot(pte));
1159 -}
1160 -
1161 -static inline pte_t pte_mkuptodate(pte_t pte)
1162 -{
1163 - pte_clear_bits(pte, _PAGE_NEWPAGE);
1164 - if(pte_present(pte))
1165 - pte_clear_bits(pte, _PAGE_NEWPROT);
1166 - return(pte);
1167 -}
1168 -
1169 -static inline pte_t pte_mknewpage(pte_t pte)
1170 -{
1171 - pte_set_bits(pte, _PAGE_NEWPAGE);
1172 - return(pte);
1173 -}
1174 -
1175 -static inline pte_t pte_mkspecial(pte_t pte)
1176 -{
1177 - return(pte);
1178 -}
1179 -
1180 -static inline void set_pte(pte_t *pteptr, pte_t pteval)
1181 -{
1182 - pte_copy(*pteptr, pteval);
1183 -
1184 - /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
1185 - * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
1186 - * mapped pages.
1187 - */
1188 -
1189 - *pteptr = pte_mknewpage(*pteptr);
1190 - if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
1191 -}
1192 -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
1193 -
1194 -#define __HAVE_ARCH_PTE_SAME
1195 -static inline int pte_same(pte_t pte_a, pte_t pte_b)
1196 -{
1197 - return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
1198 -}
1199 -
1200 -/*
1201 - * Conversion functions: convert a page and protection to a page entry,
1202 - * and a page entry and page directory to the page they refer to.
1203 - */
1204 -
1205 -#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
1206 -#define __virt_to_page(virt) phys_to_page(__pa(virt))
1207 -#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
1208 -#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
1209 -
1210 -#define mk_pte(page, pgprot) \
1211 - ({ pte_t pte; \
1212 - \
1213 - pte_set_val(pte, page_to_phys(page), (pgprot)); \
1214 - if (pte_present(pte)) \
1215 - pte_mknewprot(pte_mknewpage(pte)); \
1216 - pte;})
1217 -
1218 -static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
1219 -{
1220 - pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
1221 - return pte;
1222 -}
1223 -
1224 -/*
1225 - * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
1226 - *
1227 - * this macro returns the index of the entry in the pgd page which would
1228 - * control the given virtual address
1229 - */
1230 -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1231 -
1232 -/*
1233 - * pgd_offset() returns a (pgd_t *)
1234 - * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
1235 - */
1236 -#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
1237 -
1238 -/*
1239 - * a shortcut which implies the use of the kernel's pgd, instead
1240 - * of a process's
1241 - */
1242 -#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1243 -
1244 -/*
1245 - * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
1246 - *
1247 - * this macro returns the index of the entry in the pmd page which would
1248 - * control the given virtual address
1249 - */
1250 -#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
1251 -#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1252 -
1253 -#define pmd_page_vaddr(pmd) \
1254 - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
1255 -
1256 -/*
1257 - * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
1258 - *
1259 - * this macro returns the index of the entry in the pte page which would
1260 - * control the given virtual address
1261 - */
1262 -#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
1263 -#define pte_offset_kernel(dir, address) \
1264 - ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
1265 -#define pte_offset_map(dir, address) \
1266 - ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
1267 -#define pte_unmap(pte) do { } while (0)
1268 -
1269 -struct mm_struct;
1270 -extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
1271 -
1272 -#define update_mmu_cache(vma,address,ptep) do ; while (0)
1273 -
1274 -/* Encode and de-code a swap entry */
1275 -#define __swp_type(x) (((x).val >> 5) & 0x1f)
1276 -#define __swp_offset(x) ((x).val >> 11)
1277 -
1278 -#define __swp_entry(type, offset) \
1279 - ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
1280 -#define __pte_to_swp_entry(pte) \
1281 - ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
1282 -#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1283 -
1284 -#define kern_addr_valid(addr) (1)
1285 -
1286 -#include <asm-generic/pgtable.h>
1287 -
1288 -/* Clear a kernel PTE and flush it from the TLB */
1289 -#define kpte_clear_flush(ptep, vaddr) \
1290 -do { \
1291 - pte_clear(&init_mm, (vaddr), (ptep)); \
1292 - __flush_tlb_one((vaddr)); \
1293 -} while (0)
1294 -
1295 -#endif
1296 --- a/arch/um/include/asm/processor-generic.h
1297 +++ /dev/null
1298 @@ -1,115 +0,0 @@
1299 -/*
1300 - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
1301 - * Licensed under the GPL
1302 - */
1303 -
1304 -#ifndef __UM_PROCESSOR_GENERIC_H
1305 -#define __UM_PROCESSOR_GENERIC_H
1306 -
1307 -struct pt_regs;
1308 -
1309 -struct task_struct;
1310 -
1311 -#include <asm/ptrace.h>
1312 -#include <registers.h>
1313 -#include <sysdep/archsetjmp.h>
1314 -
1315 -#include <linux/prefetch.h>
1316 -
1317 -struct mm_struct;
1318 -
1319 -struct thread_struct {
1320 - struct pt_regs regs;
1321 - struct pt_regs *segv_regs;
1322 - int singlestep_syscall;
1323 - void *fault_addr;
1324 - jmp_buf *fault_catcher;
1325 - struct task_struct *prev_sched;
1326 - struct arch_thread arch;
1327 - jmp_buf switch_buf;
1328 - struct {
1329 - int op;
1330 - union {
1331 - struct {
1332 - int pid;
1333 - } fork, exec;
1334 - struct {
1335 - int (*proc)(void *);
1336 - void *arg;
1337 - } thread;
1338 - struct {
1339 - void (*proc)(void *);
1340 - void *arg;
1341 - } cb;
1342 - } u;
1343 - } request;
1344 -};
1345 -
1346 -#define INIT_THREAD \
1347 -{ \
1348 - .regs = EMPTY_REGS, \
1349 - .fault_addr = NULL, \
1350 - .prev_sched = NULL, \
1351 - .arch = INIT_ARCH_THREAD, \
1352 - .request = { 0 } \
1353 -}
1354 -
1355 -static inline void release_thread(struct task_struct *task)
1356 -{
1357 -}
1358 -
1359 -extern unsigned long thread_saved_pc(struct task_struct *t);
1360 -
1361 -static inline void mm_copy_segments(struct mm_struct *from_mm,
1362 - struct mm_struct *new_mm)
1363 -{
1364 -}
1365 -
1366 -#define init_stack (init_thread_union.stack)
1367 -
1368 -/*
1369 - * User space process size: 3GB (default).
1370 - */
1371 -extern unsigned long task_size;
1372 -
1373 -#define TASK_SIZE (task_size)
1374 -
1375 -#undef STACK_TOP
1376 -#undef STACK_TOP_MAX
1377 -
1378 -extern unsigned long stacksizelim;
1379 -
1380 -#define STACK_ROOM (stacksizelim)
1381 -#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
1382 -#define STACK_TOP_MAX STACK_TOP
1383 -
1384 -/* This decides where the kernel will search for a free chunk of vm
1385 - * space during mmap's.
1386 - */
1387 -#define TASK_UNMAPPED_BASE (0x40000000)
1388 -
1389 -extern void start_thread(struct pt_regs *regs, unsigned long entry,
1390 - unsigned long stack);
1391 -
1392 -struct cpuinfo_um {
1393 - unsigned long loops_per_jiffy;
1394 - int ipi_pipe[2];
1395 -};
1396 -
1397 -extern struct cpuinfo_um boot_cpu_data;
1398 -
1399 -#define my_cpu_data cpu_data[smp_processor_id()]
1400 -
1401 -#ifdef CONFIG_SMP
1402 -extern struct cpuinfo_um cpu_data[];
1403 -#define current_cpu_data cpu_data[smp_processor_id()]
1404 -#else
1405 -#define cpu_data (&boot_cpu_data)
1406 -#define current_cpu_data boot_cpu_data
1407 -#endif
1408 -
1409 -
1410 -#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
1411 -extern unsigned long get_wchan(struct task_struct *p);
1412 -
1413 -#endif
1414 --- a/arch/um/include/asm/ptrace-generic.h
1415 +++ /dev/null
1416 @@ -1,45 +0,0 @@
1417 -/*
1418 - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
1419 - * Licensed under the GPL
1420 - */
1421 -
1422 -#ifndef __UM_PTRACE_GENERIC_H
1423 -#define __UM_PTRACE_GENERIC_H
1424 -
1425 -#ifndef __ASSEMBLY__
1426 -
1427 -#include <asm/ptrace-abi.h>
1428 -#include <sysdep/ptrace.h>
1429 -
1430 -struct pt_regs {
1431 - struct uml_pt_regs regs;
1432 -};
1433 -
1434 -#define arch_has_single_step() (1)
1435 -
1436 -#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
1437 -
1438 -#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
1439 -#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
1440 -
1441 -#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs)
1442 -
1443 -#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs)
1444 -
1445 -#define instruction_pointer(regs) PT_REGS_IP(regs)
1446 -
1447 -struct task_struct;
1448 -
1449 -extern long subarch_ptrace(struct task_struct *child, long request,
1450 - unsigned long addr, unsigned long data);
1451 -extern unsigned long getreg(struct task_struct *child, int regno);
1452 -extern int putreg(struct task_struct *child, int regno, unsigned long value);
1453 -
1454 -extern int arch_copy_tls(struct task_struct *new);
1455 -extern void clear_flushed_tls(struct task_struct *task);
1456 -extern void syscall_trace_enter(struct pt_regs *regs);
1457 -extern void syscall_trace_leave(struct pt_regs *regs);
1458 -
1459 -#endif
1460 -
1461 -#endif
1462 --- a/arch/um/include/asm/setup.h
1463 +++ /dev/null
1464 @@ -1,10 +0,0 @@
1465 -#ifndef SETUP_H_INCLUDED
1466 -#define SETUP_H_INCLUDED
1467 -
1468 -/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the
1469 - * command line, so this choice is ok.
1470 - */
1471 -
1472 -#define COMMAND_LINE_SIZE 4096
1473 -
1474 -#endif /* SETUP_H_INCLUDED */
1475 --- a/arch/um/include/asm/smp.h
1476 +++ /dev/null
1477 @@ -1,32 +0,0 @@
1478 -#ifndef __UM_SMP_H
1479 -#define __UM_SMP_H
1480 -
1481 -#ifdef CONFIG_SMP
1482 -
1483 -#include <linux/bitops.h>
1484 -#include <asm/current.h>
1485 -#include <linux/cpumask.h>
1486 -
1487 -#define raw_smp_processor_id() (current_thread->cpu)
1488 -
1489 -#define cpu_logical_map(n) (n)
1490 -#define cpu_number_map(n) (n)
1491 -extern int hard_smp_processor_id(void);
1492 -#define NO_PROC_ID -1
1493 -
1494 -extern int ncpus;
1495 -
1496 -
1497 -static inline void smp_cpus_done(unsigned int maxcpus)
1498 -{
1499 -}
1500 -
1501 -extern struct task_struct *idle_threads[NR_CPUS];
1502 -
1503 -#else
1504 -
1505 -#define hard_smp_processor_id() 0
1506 -
1507 -#endif
1508 -
1509 -#endif
1510 --- a/arch/um/include/asm/sysrq.h
1511 +++ /dev/null
1512 @@ -1,7 +0,0 @@
1513 -#ifndef __UM_SYSRQ_H
1514 -#define __UM_SYSRQ_H
1515 -
1516 -struct task_struct;
1517 -extern void show_trace(struct task_struct* task, unsigned long *stack);
1518 -
1519 -#endif
1520 --- a/arch/um/include/asm/thread_info.h
1521 +++ /dev/null
1522 @@ -1,78 +0,0 @@
1523 -/*
1524 - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
1525 - * Licensed under the GPL
1526 - */
1527 -
1528 -#ifndef __UM_THREAD_INFO_H
1529 -#define __UM_THREAD_INFO_H
1530 -
1531 -#ifndef __ASSEMBLY__
1532 -
1533 -#include <asm/types.h>
1534 -#include <asm/page.h>
1535 -#include <asm/uaccess.h>
1536 -
1537 -struct thread_info {
1538 - struct task_struct *task; /* main task structure */
1539 - struct exec_domain *exec_domain; /* execution domain */
1540 - unsigned long flags; /* low level flags */
1541 - __u32 cpu; /* current CPU */
1542 - int preempt_count; /* 0 => preemptable,
1543 - <0 => BUG */
1544 - mm_segment_t addr_limit; /* thread address space:
1545 - 0-0xBFFFFFFF for user
1546 - 0-0xFFFFFFFF for kernel */
1547 - struct restart_block restart_block;
1548 - struct thread_info *real_thread; /* Points to non-IRQ stack */
1549 -};
1550 -
1551 -#define INIT_THREAD_INFO(tsk) \
1552 -{ \
1553 - .task = &tsk, \
1554 - .exec_domain = &default_exec_domain, \
1555 - .flags = 0, \
1556 - .cpu = 0, \
1557 - .preempt_count = INIT_PREEMPT_COUNT, \
1558 - .addr_limit = KERNEL_DS, \
1559 - .restart_block = { \
1560 - .fn = do_no_restart_syscall, \
1561 - }, \
1562 - .real_thread = NULL, \
1563 -}
1564 -
1565 -#define init_thread_info (init_thread_union.thread_info)
1566 -#define init_stack (init_thread_union.stack)
1567 -
1568 -#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
1569 -/* how to get the thread information struct from C */
1570 -static inline struct thread_info *current_thread_info(void)
1571 -{
1572 - struct thread_info *ti;
1573 - unsigned long mask = THREAD_SIZE - 1;
1574 - void *p;
1575 -
1576 - asm volatile ("" : "=r" (p) : "0" (&ti));
1577 - ti = (struct thread_info *) (((unsigned long)p) & ~mask);
1578 - return ti;
1579 -}
1580 -
1581 -#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
1582 -
1583 -#endif
1584 -
1585 -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
1586 -#define TIF_SIGPENDING 1 /* signal pending */
1587 -#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
1588 -#define TIF_RESTART_BLOCK 4
1589 -#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
1590 -#define TIF_SYSCALL_AUDIT 6
1591 -#define TIF_RESTORE_SIGMASK 7
1592 -#define TIF_NOTIFY_RESUME 8
1593 -
1594 -#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
1595 -#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
1596 -#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
1597 -#define _TIF_MEMDIE (1 << TIF_MEMDIE)
1598 -#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
1599 -
1600 -#endif
1601 --- a/arch/um/include/asm/timex.h
1602 +++ /dev/null
1603 @@ -1,13 +0,0 @@
1604 -#ifndef __UM_TIMEX_H
1605 -#define __UM_TIMEX_H
1606 -
1607 -typedef unsigned long cycles_t;
1608 -
1609 -static inline cycles_t get_cycles (void)
1610 -{
1611 - return 0;
1612 -}
1613 -
1614 -#define CLOCK_TICK_RATE (HZ)
1615 -
1616 -#endif
1617 --- a/arch/um/include/asm/tlb.h
1618 +++ /dev/null
1619 @@ -1,122 +0,0 @@
1620 -#ifndef __UM_TLB_H
1621 -#define __UM_TLB_H
1622 -
1623 -#include <linux/pagemap.h>
1624 -#include <linux/swap.h>
1625 -#include <asm/percpu.h>
1626 -#include <asm/pgalloc.h>
1627 -#include <asm/tlbflush.h>
1628 -
1629 -#define tlb_start_vma(tlb, vma) do { } while (0)
1630 -#define tlb_end_vma(tlb, vma) do { } while (0)
1631 -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
1632 -
1633 -/* struct mmu_gather is an opaque type used by the mm code for passing around
1634 - * any data needed by arch specific code for tlb_remove_page.
1635 - */
1636 -struct mmu_gather {
1637 - struct mm_struct *mm;
1638 - unsigned int need_flush; /* Really unmapped some ptes? */
1639 - unsigned long start;
1640 - unsigned long end;
1641 - unsigned int fullmm; /* non-zero means full mm flush */
1642 -};
1643 -
1644 -static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
1645 - unsigned long address)
1646 -{
1647 - if (tlb->start > address)
1648 - tlb->start = address;
1649 - if (tlb->end < address + PAGE_SIZE)
1650 - tlb->end = address + PAGE_SIZE;
1651 -}
1652 -
1653 -static inline void init_tlb_gather(struct mmu_gather *tlb)
1654 -{
1655 - tlb->need_flush = 0;
1656 -
1657 - tlb->start = TASK_SIZE;
1658 - tlb->end = 0;
1659 -
1660 - if (tlb->fullmm) {
1661 - tlb->start = 0;
1662 - tlb->end = TASK_SIZE;
1663 - }
1664 -}
1665 -
1666 -static inline void
1667 -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
1668 -{
1669 - tlb->mm = mm;
1670 - tlb->start = start;
1671 - tlb->end = end;
1672 - tlb->fullmm = !(start | (end+1));
1673 -
1674 - init_tlb_gather(tlb);
1675 -}
1676 -
1677 -extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
1678 - unsigned long end);
1679 -
1680 -static inline void
1681 -tlb_flush_mmu(struct mmu_gather *tlb)
1682 -{
1683 - if (!tlb->need_flush)
1684 - return;
1685 -
1686 - flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
1687 - init_tlb_gather(tlb);
1688 -}
1689 -
1690 -/* tlb_finish_mmu
1691 - * Called at the end of the shootdown operation to free up any resources
1692 - * that were required.
1693 - */
1694 -static inline void
1695 -tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
1696 -{
1697 - tlb_flush_mmu(tlb);
1698 -
1699 - /* keep the page table cache within bounds */
1700 - check_pgt_cache();
1701 -}
1702 -
1703 -/* tlb_remove_page
1704 - * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
1705 - * while handling the additional races in SMP caused by other CPUs
1706 - * caching valid mappings in their TLBs.
1707 - */
1708 -static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
1709 -{
1710 - tlb->need_flush = 1;
1711 - free_page_and_swap_cache(page);
1712 - return 1; /* avoid calling tlb_flush_mmu */
1713 -}
1714 -
1715 -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
1716 -{
1717 - __tlb_remove_page(tlb, page);
1718 -}
1719 -
1720 -/**
1721 - * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
1722 - *
1723 - * Record the fact that pte's were really umapped in ->need_flush, so we can
1724 - * later optimise away the tlb invalidate. This helps when userspace is
1725 - * unmapping already-unmapped pages, which happens quite a lot.
1726 - */
1727 -#define tlb_remove_tlb_entry(tlb, ptep, address) \
1728 - do { \
1729 - tlb->need_flush = 1; \
1730 - __tlb_remove_tlb_entry(tlb, ptep, address); \
1731 - } while (0)
1732 -
1733 -#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
1734 -
1735 -#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
1736 -
1737 -#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
1738 -
1739 -#define tlb_migrate_finish(mm) do {} while (0)
1740 -
1741 -#endif
1742 --- a/arch/um/include/asm/tlbflush.h
1743 +++ /dev/null
1744 @@ -1,31 +0,0 @@
1745 -/*
1746 - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
1747 - * Licensed under the GPL
1748 - */
1749 -
1750 -#ifndef __UM_TLBFLUSH_H
1751 -#define __UM_TLBFLUSH_H
1752 -
1753 -#include <linux/mm.h>
1754 -
1755 -/*
1756 - * TLB flushing:
1757 - *
1758 - * - flush_tlb() flushes the current mm struct TLBs
1759 - * - flush_tlb_all() flushes all processes TLBs
1760 - * - flush_tlb_mm(mm) flushes the specified mm context TLB's
1761 - * - flush_tlb_page(vma, vmaddr) flushes one page
1762 - * - flush_tlb_kernel_vm() flushes the kernel vm area
1763 - * - flush_tlb_range(vma, start, end) flushes a range of pages
1764 - */
1765 -
1766 -extern void flush_tlb_all(void);
1767 -extern void flush_tlb_mm(struct mm_struct *mm);
1768 -extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
1769 - unsigned long end);
1770 -extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
1771 -extern void flush_tlb_kernel_vm(void);
1772 -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
1773 -extern void __flush_tlb_one(unsigned long addr);
1774 -
1775 -#endif
1776 --- a/arch/um/include/asm/uaccess.h
1777 +++ /dev/null
1778 @@ -1,178 +0,0 @@
1779 -/*
1780 - * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
1781 - * Licensed under the GPL
1782 - */
1783 -
1784 -#ifndef __UM_UACCESS_H
1785 -#define __UM_UACCESS_H
1786 -
1787 -/* thread_info has a mm_segment_t in it, so put the definition up here */
1788 -typedef struct {
1789 - unsigned long seg;
1790 -} mm_segment_t;
1791 -
1792 -#include <linux/thread_info.h>
1793 -#include <linux/errno.h>
1794 -#include <asm/processor.h>
1795 -#include <asm/elf.h>
1796 -
1797 -#define VERIFY_READ 0
1798 -#define VERIFY_WRITE 1
1799 -
1800 -/*
1801 - * The fs value determines whether argument validity checking should be
1802 - * performed or not. If get_fs() == USER_DS, checking is performed, with
1803 - * get_fs() == KERNEL_DS, checking is bypassed.
1804 - *
1805 - * For historical reasons, these macros are grossly misnamed.
1806 - */
1807 -
1808 -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
1809 -
1810 -#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
1811 -#define USER_DS MAKE_MM_SEG(TASK_SIZE)
1812 -
1813 -#define get_ds() (KERNEL_DS)
1814 -#define get_fs() (current_thread_info()->addr_limit)
1815 -#define set_fs(x) (current_thread_info()->addr_limit = (x))
1816 -
1817 -#define segment_eq(a, b) ((a).seg == (b).seg)
1818 -
1819 -#define __under_task_size(addr, size) \
1820 - (((unsigned long) (addr) < TASK_SIZE) && \
1821 - (((unsigned long) (addr) + (size)) < TASK_SIZE))
1822 -
1823 -#define __access_ok_vsyscall(type, addr, size) \
1824 - ((type == VERIFY_READ) && \
1825 - ((unsigned long) (addr) >= FIXADDR_USER_START) && \
1826 - ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
1827 - ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
1828 -
1829 -#define __addr_range_nowrap(addr, size) \
1830 - ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
1831 -
1832 -#define access_ok(type, addr, size) \
1833 - (__addr_range_nowrap(addr, size) && \
1834 - (__under_task_size(addr, size) || \
1835 - __access_ok_vsyscall(type, addr, size) || \
1836 - segment_eq(get_fs(), KERNEL_DS)))
1837 -
1838 -extern int copy_from_user(void *to, const void __user *from, int n);
1839 -extern int copy_to_user(void __user *to, const void *from, int n);
1840 -
1841 -/*
1842 - * strncpy_from_user: - Copy a NUL terminated string from userspace.
1843 - * @dst: Destination address, in kernel space. This buffer must be at
1844 - * least @count bytes long.
1845 - * @src: Source address, in user space.
1846 - * @count: Maximum number of bytes to copy, including the trailing NUL.
1847 - *
1848 - * Copies a NUL-terminated string from userspace to kernel space.
1849 - *
1850 - * On success, returns the length of the string (not including the trailing
1851 - * NUL).
1852 - *
1853 - * If access to userspace fails, returns -EFAULT (some data may have been
1854 - * copied).
1855 - *
1856 - * If @count is smaller than the length of the string, copies @count bytes
1857 - * and returns @count.
1858 - */
1859 -
1860 -extern int strncpy_from_user(char *dst, const char __user *src, int count);
1861 -
1862 -/*
1863 - * __clear_user: - Zero a block of memory in user space, with less checking.
1864 - * @to: Destination address, in user space.
1865 - * @n: Number of bytes to zero.
1866 - *
1867 - * Zero a block of memory in user space. Caller must check
1868 - * the specified block with access_ok() before calling this function.
1869 - *
1870 - * Returns number of bytes that could not be cleared.
1871 - * On success, this will be zero.
1872 - */
1873 -extern int __clear_user(void __user *mem, int len);
1874 -
1875 -/*
1876 - * clear_user: - Zero a block of memory in user space.
1877 - * @to: Destination address, in user space.
1878 - * @n: Number of bytes to zero.
1879 - *
1880 - * Zero a block of memory in user space.
1881 - *
1882 - * Returns number of bytes that could not be cleared.
1883 - * On success, this will be zero.
1884 - */
1885 -extern int clear_user(void __user *mem, int len);
1886 -
1887 -/*
1888 - * strlen_user: - Get the size of a string in user space.
1889 - * @str: The string to measure.
1890 - * @n: The maximum valid length
1891 - *
1892 - * Get the size of a NUL-terminated string in user space.
1893 - *
1894 - * Returns the size of the string INCLUDING the terminating NUL.
1895 - * On exception, returns 0.
1896 - * If the string is too long, returns a value greater than @n.
1897 - */
1898 -extern int strnlen_user(const void __user *str, int len);
1899 -
1900 -#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
1901 -
1902 -#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
1903 -
1904 -#define __copy_to_user_inatomic __copy_to_user
1905 -#define __copy_from_user_inatomic __copy_from_user
1906 -
1907 -#define __get_user(x, ptr) \
1908 -({ \
1909 - const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
1910 - __typeof__(x) __private_val; \
1911 - int __private_ret = -EFAULT; \
1912 - (x) = (__typeof__(*(__private_ptr)))0; \
1913 - if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
1914 - sizeof(*(__private_ptr))) == 0) { \
1915 - (x) = (__typeof__(*(__private_ptr))) __private_val; \
1916 - __private_ret = 0; \
1917 - } \
1918 - __private_ret; \
1919 -})
1920 -
1921 -#define get_user(x, ptr) \
1922 -({ \
1923 - const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
1924 - (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
1925 - __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
1926 -})
1927 -
1928 -#define __put_user(x, ptr) \
1929 -({ \
1930 - __typeof__(*(ptr)) __user *__private_ptr = ptr; \
1931 - __typeof__(*(__private_ptr)) __private_val; \
1932 - int __private_ret = -EFAULT; \
1933 - __private_val = (__typeof__(*(__private_ptr))) (x); \
1934 - if (__copy_to_user((__private_ptr), &__private_val, \
1935 - sizeof(*(__private_ptr))) == 0) { \
1936 - __private_ret = 0; \
1937 - } \
1938 - __private_ret; \
1939 -})
1940 -
1941 -#define put_user(x, ptr) \
1942 -({ \
1943 - __typeof__(*(ptr)) __user *private_ptr = (ptr); \
1944 - (access_ok(VERIFY_WRITE, private_ptr, sizeof(*private_ptr)) ? \
1945 - __put_user(x, private_ptr) : -EFAULT); \
1946 -})
1947 -
1948 -#define strlen_user(str) strnlen_user(str, ~0U >> 1)
1949 -
1950 -struct exception_table_entry
1951 -{
1952 - unsigned long insn;
1953 - unsigned long fixup;
1954 -};
1955 -
1956 -#endif
1957 --- /dev/null
1958 +++ b/arch/um/include/uapi/asm/Kbuild
1959 @@ -0,0 +1,8 @@
1960 +generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
1961 +generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
1962 +generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
1963 +generic-y += switch_to.h clkdev.h
1964 +generic-y += trace_clock.h
1965 +generic-y += preempt.h
1966 +generic-y += hash.h
1967 +generic-y += barrier.h
1968 --- /dev/null
1969 +++ b/arch/um/include/uapi/asm/a.out-core.h
1970 @@ -0,0 +1,27 @@
1971 +/* a.out coredump register dumper
1972 + *
1973 + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
1974 + * Written by David Howells (dhowells@redhat.com)
1975 + *
1976 + * This program is free software; you can redistribute it and/or
1977 + * modify it under the terms of the GNU General Public Licence
1978 + * as published by the Free Software Foundation; either version
1979 + * 2 of the Licence, or (at your option) any later version.
1980 + */
1981 +
1982 +#ifndef __UM_A_OUT_CORE_H
1983 +#define __UM_A_OUT_CORE_H
1984 +
1985 +#ifdef __KERNEL__
1986 +
1987 +#include <linux/user.h>
1988 +
1989 +/*
1990 + * fill in the user structure for an a.out core dump
1991 + */
1992 +static inline void aout_dump_thread(struct pt_regs *regs, struct user *u)
1993 +{
1994 +}
1995 +
1996 +#endif /* __KERNEL__ */
1997 +#endif /* __UM_A_OUT_CORE_H */
1998 --- /dev/null
1999 +++ b/arch/um/include/uapi/asm/bugs.h
2000 @@ -0,0 +1,6 @@
2001 +#ifndef __UM_BUGS_H
2002 +#define __UM_BUGS_H
2003 +
2004 +void check_bugs(void);
2005 +
2006 +#endif
2007 --- /dev/null
2008 +++ b/arch/um/include/uapi/asm/cache.h
2009 @@ -0,0 +1,17 @@
2010 +#ifndef __UM_CACHE_H
2011 +#define __UM_CACHE_H
2012 +
2013 +
2014 +#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
2015 +# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
2016 +#elif defined(CONFIG_UML_X86) /* 64-bit */
2017 +# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
2018 +#else
2019 +/* XXX: this was taken from x86, now it's completely random. Luckily only
2020 + * affects SMP padding. */
2021 +# define L1_CACHE_SHIFT 5
2022 +#endif
2023 +
2024 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2025 +
2026 +#endif
2027 --- /dev/null
2028 +++ b/arch/um/include/uapi/asm/common.lds.S
2029 @@ -0,0 +1,107 @@
2030 +#include <asm-generic/vmlinux.lds.h>
2031 +
2032 + .fini : { *(.fini) } =0x9090
2033 + _etext = .;
2034 + PROVIDE (etext = .);
2035 +
2036 + . = ALIGN(4096);
2037 + _sdata = .;
2038 + PROVIDE (sdata = .);
2039 +
2040 + RODATA
2041 +
2042 + .unprotected : { *(.unprotected) }
2043 + . = ALIGN(4096);
2044 + PROVIDE (_unprotected_end = .);
2045 +
2046 + . = ALIGN(4096);
2047 + .note : { *(.note.*) }
2048 + EXCEPTION_TABLE(0)
2049 +
2050 + BUG_TABLE
2051 +
2052 + .uml.setup.init : {
2053 + __uml_setup_start = .;
2054 + *(.uml.setup.init)
2055 + __uml_setup_end = .;
2056 + }
2057 +
2058 + .uml.help.init : {
2059 + __uml_help_start = .;
2060 + *(.uml.help.init)
2061 + __uml_help_end = .;
2062 + }
2063 +
2064 + .uml.postsetup.init : {
2065 + __uml_postsetup_start = .;
2066 + *(.uml.postsetup.init)
2067 + __uml_postsetup_end = .;
2068 + }
2069 +
2070 + .init.setup : {
2071 + INIT_SETUP(0)
2072 + }
2073 +
2074 + PERCPU_SECTION(32)
2075 +
2076 + .initcall.init : {
2077 + INIT_CALLS
2078 + }
2079 +
2080 + .con_initcall.init : {
2081 + CON_INITCALL
2082 + }
2083 +
2084 + .uml.initcall.init : {
2085 + __uml_initcall_start = .;
2086 + *(.uml.initcall.init)
2087 + __uml_initcall_end = .;
2088 + }
2089 +
2090 + SECURITY_INIT
2091 +
2092 + .exitcall : {
2093 + __exitcall_begin = .;
2094 + *(.exitcall.exit)
2095 + __exitcall_end = .;
2096 + }
2097 +
2098 + .uml.exitcall : {
2099 + __uml_exitcall_begin = .;
2100 + *(.uml.exitcall.exit)
2101 + __uml_exitcall_end = .;
2102 + }
2103 +
2104 + . = ALIGN(4);
2105 + .altinstructions : {
2106 + __alt_instructions = .;
2107 + *(.altinstructions)
2108 + __alt_instructions_end = .;
2109 + }
2110 + .altinstr_replacement : { *(.altinstr_replacement) }
2111 + /* .exit.text is discard at runtime, not link time, to deal with references
2112 + from .altinstructions and .eh_frame */
2113 + .exit.text : { *(.exit.text) }
2114 + .exit.data : { *(.exit.data) }
2115 +
2116 + .preinit_array : {
2117 + __preinit_array_start = .;
2118 + *(.preinit_array)
2119 + __preinit_array_end = .;
2120 + }
2121 + .init_array : {
2122 + __init_array_start = .;
2123 + *(.init_array)
2124 + __init_array_end = .;
2125 + }
2126 + .fini_array : {
2127 + __fini_array_start = .;
2128 + *(.fini_array)
2129 + __fini_array_end = .;
2130 + }
2131 +
2132 + . = ALIGN(4096);
2133 + .init.ramfs : {
2134 + INIT_RAM_FS
2135 + }
2136 +
2137 --- /dev/null
2138 +++ b/arch/um/include/uapi/asm/dma.h
2139 @@ -0,0 +1,10 @@
2140 +#ifndef __UM_DMA_H
2141 +#define __UM_DMA_H
2142 +
2143 +#include <asm/io.h>
2144 +
2145 +extern unsigned long uml_physmem;
2146 +
2147 +#define MAX_DMA_ADDRESS (uml_physmem)
2148 +
2149 +#endif
2150 --- /dev/null
2151 +++ b/arch/um/include/uapi/asm/fixmap.h
2152 @@ -0,0 +1,60 @@
2153 +#ifndef __UM_FIXMAP_H
2154 +#define __UM_FIXMAP_H
2155 +
2156 +#include <asm/processor.h>
2157 +#include <asm/kmap_types.h>
2158 +#include <asm/archparam.h>
2159 +#include <asm/page.h>
2160 +#include <linux/threads.h>
2161 +
2162 +/*
2163 + * Here we define all the compile-time 'special' virtual
2164 + * addresses. The point is to have a constant address at
2165 + * compile time, but to set the physical address only
2166 + * in the boot process. We allocate these special addresses
2167 + * from the end of virtual memory (0xfffff000) backwards.
2168 + * Also this lets us do fail-safe vmalloc(), we
2169 + * can guarantee that these special addresses and
2170 + * vmalloc()-ed addresses never overlap.
2171 + *
2172 + * these 'compile-time allocated' memory buffers are
2173 + * fixed-size 4k pages. (or larger if used with an increment
2174 + * highger than 1) use fixmap_set(idx,phys) to associate
2175 + * physical memory with fixmap indices.
2176 + *
2177 + * TLB entries of such buffers will not be flushed across
2178 + * task switches.
2179 + */
2180 +
2181 +/*
2182 + * on UP currently we will have no trace of the fixmap mechanizm,
2183 + * no page table allocations, etc. This might change in the
2184 + * future, say framebuffers for the console driver(s) could be
2185 + * fix-mapped?
2186 + */
2187 +enum fixed_addresses {
2188 +#ifdef CONFIG_HIGHMEM
2189 + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
2190 + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
2191 +#endif
2192 + __end_of_fixed_addresses
2193 +};
2194 +
2195 +extern void __set_fixmap (enum fixed_addresses idx,
2196 + unsigned long phys, pgprot_t flags);
2197 +
2198 +/*
2199 + * used by vmalloc.c.
2200 + *
2201 + * Leave one empty page between vmalloc'ed areas and
2202 + * the start of the fixmap, and leave one page empty
2203 + * at the top of mem..
2204 + */
2205 +
2206 +#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
2207 +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
2208 +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
2209 +
2210 +#include <asm-generic/fixmap.h>
2211 +
2212 +#endif
2213 --- /dev/null
2214 +++ b/arch/um/include/uapi/asm/irq.h
2215 @@ -0,0 +1,23 @@
2216 +#ifndef __UM_IRQ_H
2217 +#define __UM_IRQ_H
2218 +
2219 +#define TIMER_IRQ 0
2220 +#define UMN_IRQ 1
2221 +#define CONSOLE_IRQ 2
2222 +#define CONSOLE_WRITE_IRQ 3
2223 +#define UBD_IRQ 4
2224 +#define UM_ETH_IRQ 5
2225 +#define SSL_IRQ 6
2226 +#define SSL_WRITE_IRQ 7
2227 +#define ACCEPT_IRQ 8
2228 +#define MCONSOLE_IRQ 9
2229 +#define WINCH_IRQ 10
2230 +#define SIGIO_WRITE_IRQ 11
2231 +#define TELNETD_IRQ 12
2232 +#define XTERM_IRQ 13
2233 +#define RANDOM_IRQ 14
2234 +
2235 +#define LAST_IRQ RANDOM_IRQ
2236 +#define NR_IRQS (LAST_IRQ + 1)
2237 +
2238 +#endif
2239 --- /dev/null
2240 +++ b/arch/um/include/uapi/asm/irqflags.h
2241 @@ -0,0 +1,42 @@
2242 +#ifndef __UM_IRQFLAGS_H
2243 +#define __UM_IRQFLAGS_H
2244 +
2245 +extern int get_signals(void);
2246 +extern int set_signals(int enable);
2247 +extern void block_signals(void);
2248 +extern void unblock_signals(void);
2249 +
2250 +static inline unsigned long arch_local_save_flags(void)
2251 +{
2252 + return get_signals();
2253 +}
2254 +
2255 +static inline void arch_local_irq_restore(unsigned long flags)
2256 +{
2257 + set_signals(flags);
2258 +}
2259 +
2260 +static inline void arch_local_irq_enable(void)
2261 +{
2262 + unblock_signals();
2263 +}
2264 +
2265 +static inline void arch_local_irq_disable(void)
2266 +{
2267 + block_signals();
2268 +}
2269 +
2270 +static inline unsigned long arch_local_irq_save(void)
2271 +{
2272 + unsigned long flags;
2273 + flags = arch_local_save_flags();
2274 + arch_local_irq_disable();
2275 + return flags;
2276 +}
2277 +
2278 +static inline bool arch_irqs_disabled(void)
2279 +{
2280 + return arch_local_save_flags() == 0;
2281 +}
2282 +
2283 +#endif
2284 --- /dev/null
2285 +++ b/arch/um/include/uapi/asm/kmap_types.h
2286 @@ -0,0 +1,13 @@
2287 +/*
2288 + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
2289 + * Licensed under the GPL
2290 + */
2291 +
2292 +#ifndef __UM_KMAP_TYPES_H
2293 +#define __UM_KMAP_TYPES_H
2294 +
2295 +/* No more #include "asm/arch/kmap_types.h" ! */
2296 +
2297 +#define KM_TYPE_NR 14
2298 +
2299 +#endif
2300 --- /dev/null
2301 +++ b/arch/um/include/uapi/asm/kvm_para.h
2302 @@ -0,0 +1 @@
2303 +#include <asm-generic/kvm_para.h>
2304 --- /dev/null
2305 +++ b/arch/um/include/uapi/asm/mmu.h
2306 @@ -0,0 +1,24 @@
2307 +/*
2308 + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
2309 + * Licensed under the GPL
2310 + */
2311 +
2312 +#ifndef __ARCH_UM_MMU_H
2313 +#define __ARCH_UM_MMU_H
2314 +
2315 +#include <mm_id.h>
2316 +#include <asm/mm_context.h>
2317 +
2318 +typedef struct mm_context {
2319 + struct mm_id id;
2320 + struct uml_arch_mm_context arch;
2321 + struct page *stub_pages[2];
2322 +} mm_context_t;
2323 +
2324 +extern void __switch_mm(struct mm_id * mm_idp);
2325 +
2326 +/* Avoid tangled inclusion with asm/ldt.h */
2327 +extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
2328 +extern void free_ldt(struct mm_context *mm);
2329 +
2330 +#endif
2331 --- /dev/null
2332 +++ b/arch/um/include/uapi/asm/mmu_context.h
2333 @@ -0,0 +1,58 @@
2334 +/*
2335 + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
2336 + * Licensed under the GPL
2337 + */
2338 +
2339 +#ifndef __UM_MMU_CONTEXT_H
2340 +#define __UM_MMU_CONTEXT_H
2341 +
2342 +#include <linux/sched.h>
2343 +#include <asm/mmu.h>
2344 +
2345 +extern void uml_setup_stubs(struct mm_struct *mm);
2346 +extern void arch_exit_mmap(struct mm_struct *mm);
2347 +
2348 +#define deactivate_mm(tsk,mm) do { } while (0)
2349 +
2350 +extern void force_flush_all(void);
2351 +
2352 +static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
2353 +{
2354 + /*
2355 + * This is called by fs/exec.c and sys_unshare()
2356 + * when the new ->mm is used for the first time.
2357 + */
2358 + __switch_mm(&new->context.id);
2359 + down_write(&new->mmap_sem);
2360 + uml_setup_stubs(new);
2361 + up_write(&new->mmap_sem);
2362 +}
2363 +
2364 +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
2365 + struct task_struct *tsk)
2366 +{
2367 + unsigned cpu = smp_processor_id();
2368 +
2369 + if(prev != next){
2370 + cpumask_clear_cpu(cpu, mm_cpumask(prev));
2371 + cpumask_set_cpu(cpu, mm_cpumask(next));
2372 + if(next != &init_mm)
2373 + __switch_mm(&next->context.id);
2374 + }
2375 +}
2376 +
2377 +static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
2378 +{
2379 + uml_setup_stubs(mm);
2380 +}
2381 +
2382 +static inline void enter_lazy_tlb(struct mm_struct *mm,
2383 + struct task_struct *tsk)
2384 +{
2385 +}
2386 +
2387 +extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
2388 +
2389 +extern void destroy_context(struct mm_struct *mm);
2390 +
2391 +#endif
2392 --- /dev/null
2393 +++ b/arch/um/include/uapi/asm/page.h
2394 @@ -0,0 +1,122 @@
2395 +/*
2396 + * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
2397 + * Copyright 2003 PathScale, Inc.
2398 + * Licensed under the GPL
2399 + */
2400 +
2401 +#ifndef __UM_PAGE_H
2402 +#define __UM_PAGE_H
2403 +
2404 +#include <linux/const.h>
2405 +
2406 +/* PAGE_SHIFT determines the page size */
2407 +#define PAGE_SHIFT 12
2408 +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
2409 +#define PAGE_MASK (~(PAGE_SIZE-1))
2410 +
2411 +#ifndef __ASSEMBLY__
2412 +
2413 +struct page;
2414 +
2415 +#include <linux/types.h>
2416 +#include <asm/vm-flags.h>
2417 +
2418 +/*
2419 + * These are used to make use of C type-checking..
2420 + */
2421 +
2422 +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
2423 +#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
2424 +
2425 +#define clear_user_page(page, vaddr, pg) clear_page(page)
2426 +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
2427 +
2428 +#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
2429 +
2430 +typedef struct { unsigned long pte_low, pte_high; } pte_t;
2431 +typedef struct { unsigned long pmd; } pmd_t;
2432 +typedef struct { unsigned long pgd; } pgd_t;
2433 +#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
2434 +
2435 +#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
2436 +#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
2437 +#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
2438 +#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
2439 + smp_wmb(); \
2440 + (to).pte_low = (from).pte_low; })
2441 +#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
2442 +#define pte_set_val(pte, phys, prot) \
2443 + ({ (pte).pte_high = (phys) >> 32; \
2444 + (pte).pte_low = (phys) | pgprot_val(prot); })
2445 +
2446 +#define pmd_val(x) ((x).pmd)
2447 +#define __pmd(x) ((pmd_t) { (x) } )
2448 +
2449 +typedef unsigned long long pfn_t;
2450 +typedef unsigned long long phys_t;
2451 +
2452 +#else
2453 +
2454 +typedef struct { unsigned long pte; } pte_t;
2455 +typedef struct { unsigned long pgd; } pgd_t;
2456 +
2457 +#ifdef CONFIG_3_LEVEL_PGTABLES
2458 +typedef struct { unsigned long pmd; } pmd_t;
2459 +#define pmd_val(x) ((x).pmd)
2460 +#define __pmd(x) ((pmd_t) { (x) } )
2461 +#endif
2462 +
2463 +#define pte_val(x) ((x).pte)
2464 +
2465 +
2466 +#define pte_get_bits(p, bits) ((p).pte & (bits))
2467 +#define pte_set_bits(p, bits) ((p).pte |= (bits))
2468 +#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
2469 +#define pte_copy(to, from) ((to).pte = (from).pte)
2470 +#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
2471 +#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
2472 +
2473 +typedef unsigned long pfn_t;
2474 +typedef unsigned long phys_t;
2475 +
2476 +#endif
2477 +
2478 +typedef struct { unsigned long pgprot; } pgprot_t;
2479 +
2480 +typedef struct page *pgtable_t;
2481 +
2482 +#define pgd_val(x) ((x).pgd)
2483 +#define pgprot_val(x) ((x).pgprot)
2484 +
2485 +#define __pte(x) ((pte_t) { (x) } )
2486 +#define __pgd(x) ((pgd_t) { (x) } )
2487 +#define __pgprot(x) ((pgprot_t) { (x) } )
2488 +
2489 +extern unsigned long uml_physmem;
2490 +
2491 +#define PAGE_OFFSET (uml_physmem)
2492 +#define KERNELBASE PAGE_OFFSET
2493 +
2494 +#define __va_space (8*1024*1024)
2495 +
2496 +#include <mem.h>
2497 +
2498 +/* Cast to unsigned long before casting to void * to avoid a warning from
2499 + * mmap_kmem about cutting a long long down to a void *. Not sure that
2500 + * casting is the right thing, but 32-bit UML can't have 64-bit virtual
2501 + * addresses
2502 + */
2503 +#define __pa(virt) to_phys((void *) (unsigned long) (virt))
2504 +#define __va(phys) to_virt((unsigned long) (phys))
2505 +
2506 +#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
2507 +#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
2508 +
2509 +#define pfn_valid(pfn) ((pfn) < max_mapnr)
2510 +#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
2511 +
2512 +#include <asm-generic/memory_model.h>
2513 +#include <asm-generic/getorder.h>
2514 +
2515 +#endif /* __ASSEMBLY__ */
2516 +#endif /* __UM_PAGE_H */
2517 --- /dev/null
2518 +++ b/arch/um/include/uapi/asm/pgalloc.h
2519 @@ -0,0 +1,61 @@
2520 +/*
2521 + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
2522 + * Copyright 2003 PathScale, Inc.
2523 + * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
2524 + * Licensed under the GPL
2525 + */
2526 +
2527 +#ifndef __UM_PGALLOC_H
2528 +#define __UM_PGALLOC_H
2529 +
2530 +#include <linux/mm.h>
2531 +
2532 +#define pmd_populate_kernel(mm, pmd, pte) \
2533 + set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
2534 +
2535 +#define pmd_populate(mm, pmd, pte) \
2536 + set_pmd(pmd, __pmd(_PAGE_TABLE + \
2537 + ((unsigned long long)page_to_pfn(pte) << \
2538 + (unsigned long long) PAGE_SHIFT)))
2539 +#define pmd_pgtable(pmd) pmd_page(pmd)
2540 +
2541 +/*
2542 + * Allocate and free page tables.
2543 + */
2544 +extern pgd_t *pgd_alloc(struct mm_struct *);
2545 +extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
2546 +
2547 +extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
2548 +extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
2549 +
2550 +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2551 +{
2552 + free_page((unsigned long) pte);
2553 +}
2554 +
2555 +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
2556 +{
2557 + pgtable_page_dtor(pte);
2558 + __free_page(pte);
2559 +}
2560 +
2561 +#define __pte_free_tlb(tlb,pte, address) \
2562 +do { \
2563 + pgtable_page_dtor(pte); \
2564 + tlb_remove_page((tlb),(pte)); \
2565 +} while (0)
2566 +
2567 +#ifdef CONFIG_3_LEVEL_PGTABLES
2568 +
2569 +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
2570 +{
2571 + free_page((unsigned long)pmd);
2572 +}
2573 +
2574 +#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
2575 +#endif
2576 +
2577 +#define check_pgt_cache() do { } while (0)
2578 +
2579 +#endif
2580 +
2581 --- /dev/null
2582 +++ b/arch/um/include/uapi/asm/pgtable-2level.h
2583 @@ -0,0 +1,53 @@
2584 +/*
2585 + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
2586 + * Copyright 2003 PathScale, Inc.
2587 + * Derived from include/asm-i386/pgtable.h
2588 + * Licensed under the GPL
2589 + */
2590 +
2591 +#ifndef __UM_PGTABLE_2LEVEL_H
2592 +#define __UM_PGTABLE_2LEVEL_H
2593 +
2594 +#include <asm-generic/pgtable-nopmd.h>
2595 +
2596 +/* PGDIR_SHIFT determines what a third-level page table entry can map */
2597 +
2598 +#define PGDIR_SHIFT 22
2599 +#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
2600 +#define PGDIR_MASK (~(PGDIR_SIZE-1))
2601 +
2602 +/*
2603 + * entries per page directory level: the i386 is two-level, so
2604 + * we don't really have any PMD directory physically.
2605 + */
2606 +#define PTRS_PER_PTE 1024
2607 +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
2608 +#define PTRS_PER_PGD 1024
2609 +#define FIRST_USER_ADDRESS 0
2610 +
2611 +#define pte_ERROR(e) \
2612 + printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
2613 + pte_val(e))
2614 +#define pgd_ERROR(e) \
2615 + printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
2616 + pgd_val(e))
2617 +
2618 +static inline int pgd_newpage(pgd_t pgd) { return 0; }
2619 +static inline void pgd_mkuptodate(pgd_t pgd) { }
2620 +
2621 +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
2622 +
2623 +#define pte_pfn(x) phys_to_pfn(pte_val(x))
2624 +#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
2625 +#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
2626 +
2627 +/*
2628 + * Bits 0 through 4 are taken
2629 + */
2630 +#define PTE_FILE_MAX_BITS 27
2631 +
2632 +#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
2633 +
2634 +#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
2635 +
2636 +#endif
2637 --- /dev/null
2638 +++ b/arch/um/include/uapi/asm/pgtable-3level.h
2639 @@ -0,0 +1,136 @@
2640 +/*
2641 + * Copyright 2003 PathScale Inc
2642 + * Derived from include/asm-i386/pgtable.h
2643 + * Licensed under the GPL
2644 + */
2645 +
2646 +#ifndef __UM_PGTABLE_3LEVEL_H
2647 +#define __UM_PGTABLE_3LEVEL_H
2648 +
2649 +#include <asm-generic/pgtable-nopud.h>
2650 +
2651 +/* PGDIR_SHIFT determines what a third-level page table entry can map */
2652 +
2653 +#ifdef CONFIG_64BIT
2654 +#define PGDIR_SHIFT 30
2655 +#else
2656 +#define PGDIR_SHIFT 31
2657 +#endif
2658 +#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
2659 +#define PGDIR_MASK (~(PGDIR_SIZE-1))
2660 +
2661 +/* PMD_SHIFT determines the size of the area a second-level page table can
2662 + * map
2663 + */
2664 +
2665 +#define PMD_SHIFT 21
2666 +#define PMD_SIZE (1UL << PMD_SHIFT)
2667 +#define PMD_MASK (~(PMD_SIZE-1))
2668 +
2669 +/*
2670 + * entries per page directory level
2671 + */
2672 +
2673 +#define PTRS_PER_PTE 512
2674 +#ifdef CONFIG_64BIT
2675 +#define PTRS_PER_PMD 512
2676 +#define PTRS_PER_PGD 512
2677 +#else
2678 +#define PTRS_PER_PMD 1024
2679 +#define PTRS_PER_PGD 1024
2680 +#endif
2681 +
2682 +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
2683 +#define FIRST_USER_ADDRESS 0
2684 +
2685 +#define pte_ERROR(e) \
2686 + printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
2687 + pte_val(e))
2688 +#define pmd_ERROR(e) \
2689 + printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
2690 + pmd_val(e))
2691 +#define pgd_ERROR(e) \
2692 + printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
2693 + pgd_val(e))
2694 +
2695 +#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
2696 +#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
2697 +#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
2698 +#define pud_populate(mm, pud, pmd) \
2699 + set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
2700 +
2701 +#ifdef CONFIG_64BIT
2702 +#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
2703 +#else
2704 +#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
2705 +#endif
2706 +
2707 +static inline int pgd_newpage(pgd_t pgd)
2708 +{
2709 + return(pgd_val(pgd) & _PAGE_NEWPAGE);
2710 +}
2711 +
2712 +static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
2713 +
2714 +#ifdef CONFIG_64BIT
2715 +#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
2716 +#else
2717 +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
2718 +#endif
2719 +
2720 +struct mm_struct;
2721 +extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
2722 +
2723 +static inline void pud_clear (pud_t *pud)
2724 +{
2725 + set_pud(pud, __pud(_PAGE_NEWPAGE));
2726 +}
2727 +
2728 +#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
2729 +#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
2730 +
2731 +/* Find an entry in the second-level page table.. */
2732 +#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
2733 + pmd_index(address))
2734 +
2735 +static inline unsigned long pte_pfn(pte_t pte)
2736 +{
2737 + return phys_to_pfn(pte_val(pte));
2738 +}
2739 +
2740 +static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
2741 +{
2742 + pte_t pte;
2743 + phys_t phys = pfn_to_phys(page_nr);
2744 +
2745 + pte_set_val(pte, phys, pgprot);
2746 + return pte;
2747 +}
2748 +
2749 +static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
2750 +{
2751 + return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
2752 +}
2753 +
2754 +/*
2755 + * Bits 0 through 3 are taken in the low part of the pte,
2756 + * put the 32 bits of offset into the high part.
2757 + */
2758 +#define PTE_FILE_MAX_BITS 32
2759 +
2760 +#ifdef CONFIG_64BIT
2761 +
2762 +#define pte_to_pgoff(p) ((p).pte >> 32)
2763 +
2764 +#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
2765 +
2766 +#else
2767 +
2768 +#define pte_to_pgoff(pte) ((pte).pte_high)
2769 +
2770 +#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
2771 +
2772 +#endif
2773 +
2774 +#endif
2775 +
2776 --- /dev/null
2777 +++ b/arch/um/include/uapi/asm/pgtable.h
2778 @@ -0,0 +1,375 @@
2779 +/*
2780 + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
2781 + * Copyright 2003 PathScale, Inc.
2782 + * Derived from include/asm-i386/pgtable.h
2783 + * Licensed under the GPL
2784 + */
2785 +
2786 +#ifndef __UM_PGTABLE_H
2787 +#define __UM_PGTABLE_H
2788 +
2789 +#include <asm/fixmap.h>
2790 +
2791 +#define _PAGE_PRESENT 0x001
2792 +#define _PAGE_NEWPAGE 0x002
2793 +#define _PAGE_NEWPROT 0x004
2794 +#define _PAGE_RW 0x020
2795 +#define _PAGE_USER 0x040
2796 +#define _PAGE_ACCESSED 0x080
2797 +#define _PAGE_DIRTY 0x100
2798 +/* If _PAGE_PRESENT is clear, we use these: */
2799 +#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
2800 +#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
2801 + pte_present gives true */
2802 +
2803 +#ifdef CONFIG_3_LEVEL_PGTABLES
2804 +#include <asm/pgtable-3level.h>
2805 +#else
2806 +#include <asm/pgtable-2level.h>
2807 +#endif
2808 +
2809 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
2810 +
2811 +/* zero page used for uninitialized stuff */
2812 +extern unsigned long *empty_zero_page;
2813 +
2814 +#define pgtable_cache_init() do ; while (0)
2815 +
2816 +/* Just any arbitrary offset to the start of the vmalloc VM area: the
2817 + * current 8MB value just means that there will be a 8MB "hole" after the
2818 + * physical memory until the kernel virtual memory starts. That means that
2819 + * any out-of-bounds memory accesses will hopefully be caught.
2820 + * The vmalloc() routines leaves a hole of 4kB between each vmalloced
2821 + * area for the same reason. ;)
2822 + */
2823 +
2824 +extern unsigned long end_iomem;
2825 +
2826 +#define VMALLOC_OFFSET (__va_space)
2827 +#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
2828 +#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
2829 +#ifdef CONFIG_HIGHMEM
2830 +# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
2831 +#else
2832 +# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
2833 +#endif
2834 +#define MODULES_VADDR VMALLOC_START
2835 +#define MODULES_END VMALLOC_END
2836 +#define MODULES_LEN (MODULES_VADDR - MODULES_END)
2837 +
2838 +#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
2839 +#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
2840 +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
2841 +#define __PAGE_KERNEL_EXEC \
2842 + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
2843 +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
2844 +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
2845 +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
2846 +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
2847 +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
2848 +#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
2849 +
2850 +/*
2851 + * The i386 can't do page protection for execute, and considers that the same
2852 + * are read.
2853 + * Also, write permissions imply read permissions. This is the closest we can
2854 + * get..
2855 + */
2856 +#define __P000 PAGE_NONE
2857 +#define __P001 PAGE_READONLY
2858 +#define __P010 PAGE_COPY
2859 +#define __P011 PAGE_COPY
2860 +#define __P100 PAGE_READONLY
2861 +#define __P101 PAGE_READONLY
2862 +#define __P110 PAGE_COPY
2863 +#define __P111 PAGE_COPY
2864 +
2865 +#define __S000 PAGE_NONE
2866 +#define __S001 PAGE_READONLY
2867 +#define __S010 PAGE_SHARED
2868 +#define __S011 PAGE_SHARED
2869 +#define __S100 PAGE_READONLY
2870 +#define __S101 PAGE_READONLY
2871 +#define __S110 PAGE_SHARED
2872 +#define __S111 PAGE_SHARED
2873 +
2874 +/*
2875 + * ZERO_PAGE is a global shared page that is always zero: used
2876 + * for zero-mapped memory areas etc..
2877 + */
2878 +#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
2879 +
2880 +#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
2881 +
2882 +#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
2883 +#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
2884 +
2885 +#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
2886 +#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
2887 +
2888 +#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
2889 +#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
2890 +
2891 +#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
2892 +#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
2893 +
2894 +#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
2895 +
2896 +#define pte_page(x) pfn_to_page(pte_pfn(x))
2897 +
2898 +#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
2899 +
2900 +/*
2901 + * =================================
2902 + * Flags checking section.
2903 + * =================================
2904 + */
2905 +
2906 +static inline int pte_none(pte_t pte)
2907 +{
2908 + return pte_is_zero(pte);
2909 +}
2910 +
2911 +/*
2912 + * The following only work if pte_present() is true.
2913 + * Undefined behaviour if not..
2914 + */
2915 +static inline int pte_read(pte_t pte)
2916 +{
2917 + return((pte_get_bits(pte, _PAGE_USER)) &&
2918 + !(pte_get_bits(pte, _PAGE_PROTNONE)));
2919 +}
2920 +
2921 +static inline int pte_exec(pte_t pte){
2922 + return((pte_get_bits(pte, _PAGE_USER)) &&
2923 + !(pte_get_bits(pte, _PAGE_PROTNONE)));
2924 +}
2925 +
2926 +static inline int pte_write(pte_t pte)
2927 +{
2928 + return((pte_get_bits(pte, _PAGE_RW)) &&
2929 + !(pte_get_bits(pte, _PAGE_PROTNONE)));
2930 +}
2931 +
2932 +/*
2933 + * The following only works if pte_present() is not true.
2934 + */
2935 +static inline int pte_file(pte_t pte)
2936 +{
2937 + return pte_get_bits(pte, _PAGE_FILE);
2938 +}
2939 +
2940 +static inline int pte_dirty(pte_t pte)
2941 +{
2942 + return pte_get_bits(pte, _PAGE_DIRTY);
2943 +}
2944 +
2945 +static inline int pte_young(pte_t pte)
2946 +{
2947 + return pte_get_bits(pte, _PAGE_ACCESSED);
2948 +}
2949 +
2950 +static inline int pte_newpage(pte_t pte)
2951 +{
2952 + return pte_get_bits(pte, _PAGE_NEWPAGE);
2953 +}
2954 +
2955 +static inline int pte_newprot(pte_t pte)
2956 +{
2957 + return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
2958 +}
2959 +
2960 +static inline int pte_special(pte_t pte)
2961 +{
2962 + return 0;
2963 +}
2964 +
2965 +/*
2966 + * =================================
2967 + * Flags setting section.
2968 + * =================================
2969 + */
2970 +
2971 +static inline pte_t pte_mknewprot(pte_t pte)
2972 +{
2973 + pte_set_bits(pte, _PAGE_NEWPROT);
2974 + return(pte);
2975 +}
2976 +
2977 +static inline pte_t pte_mkclean(pte_t pte)
2978 +{
2979 + pte_clear_bits(pte, _PAGE_DIRTY);
2980 + return(pte);
2981 +}
2982 +
2983 +static inline pte_t pte_mkold(pte_t pte)
2984 +{
2985 + pte_clear_bits(pte, _PAGE_ACCESSED);
2986 + return(pte);
2987 +}
2988 +
2989 +static inline pte_t pte_wrprotect(pte_t pte)
2990 +{
2991 + pte_clear_bits(pte, _PAGE_RW);
2992 + return(pte_mknewprot(pte));
2993 +}
2994 +
2995 +static inline pte_t pte_mkread(pte_t pte)
2996 +{
2997 + pte_set_bits(pte, _PAGE_USER);
2998 + return(pte_mknewprot(pte));
2999 +}
3000 +
3001 +static inline pte_t pte_mkdirty(pte_t pte)
3002 +{
3003 + pte_set_bits(pte, _PAGE_DIRTY);
3004 + return(pte);
3005 +}
3006 +
3007 +static inline pte_t pte_mkyoung(pte_t pte)
3008 +{
3009 + pte_set_bits(pte, _PAGE_ACCESSED);
3010 + return(pte);
3011 +}
3012 +
3013 +static inline pte_t pte_mkwrite(pte_t pte)
3014 +{
3015 + pte_set_bits(pte, _PAGE_RW);
3016 + return(pte_mknewprot(pte));
3017 +}
3018 +
3019 +static inline pte_t pte_mkuptodate(pte_t pte)
3020 +{
3021 + pte_clear_bits(pte, _PAGE_NEWPAGE);
3022 + if(pte_present(pte))
3023 + pte_clear_bits(pte, _PAGE_NEWPROT);
3024 + return(pte);
3025 +}
3026 +
3027 +static inline pte_t pte_mknewpage(pte_t pte)
3028 +{
3029 + pte_set_bits(pte, _PAGE_NEWPAGE);
3030 + return(pte);
3031 +}
3032 +
3033 +static inline pte_t pte_mkspecial(pte_t pte)
3034 +{
3035 + return(pte);
3036 +}
3037 +
3038 +static inline void set_pte(pte_t *pteptr, pte_t pteval)
3039 +{
3040 + pte_copy(*pteptr, pteval);
3041 +
3042 + /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
3043 + * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
3044 + * mapped pages.
3045 + */
3046 +
3047 + *pteptr = pte_mknewpage(*pteptr);
3048 + if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
3049 +}
3050 +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
3051 +
3052 +#define __HAVE_ARCH_PTE_SAME
3053 +static inline int pte_same(pte_t pte_a, pte_t pte_b)
3054 +{
3055 + return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
3056 +}
3057 +
3058 +/*
3059 + * Conversion functions: convert a page and protection to a page entry,
3060 + * and a page entry and page directory to the page they refer to.
3061 + */
3062 +
3063 +#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
3064 +#define __virt_to_page(virt) phys_to_page(__pa(virt))
3065 +#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
3066 +#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
3067 +
3068 +#define mk_pte(page, pgprot) \
3069 + ({ pte_t pte; \
3070 + \
3071 + pte_set_val(pte, page_to_phys(page), (pgprot)); \
3072 + if (pte_present(pte)) \
3073 + pte_mknewprot(pte_mknewpage(pte)); \
3074 + pte;})
3075 +
3076 +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
3077 +{
3078 + pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
3079 + return pte;
3080 +}
3081 +
3082 +/*
3083 + * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
3084 + *
3085 + * this macro returns the index of the entry in the pgd page which would
3086 + * control the given virtual address
3087 + */
3088 +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
3089 +
3090 +/*
3091 + * pgd_offset() returns a (pgd_t *)
3092 + * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
3093 + */
3094 +#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
3095 +
3096 +/*
3097 + * a shortcut which implies the use of the kernel's pgd, instead
3098 + * of a process's
3099 + */
3100 +#define pgd_offset_k(address) pgd_offset(&init_mm, address)
3101 +
3102 +/*
3103 + * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
3104 + *
3105 + * this macro returns the index of the entry in the pmd page which would
3106 + * control the given virtual address
3107 + */
3108 +#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
3109 +#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
3110 +
3111 +#define pmd_page_vaddr(pmd) \
3112 + ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
3113 +
3114 +/*
3115 + * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
3116 + *
3117 + * this macro returns the index of the entry in the pte page which would
3118 + * control the given virtual address
3119 + */
3120 +#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
3121 +#define pte_offset_kernel(dir, address) \
3122 + ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
3123 +#define pte_offset_map(dir, address) \
3124 + ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
3125 +#define pte_unmap(pte) do { } while (0)
3126 +
3127 +struct mm_struct;
3128 +extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
3129 +
3130 +#define update_mmu_cache(vma,address,ptep) do ; while (0)
3131 +
3132 +/* Encode and de-code a swap entry */
3133 +#define __swp_type(x) (((x).val >> 5) & 0x1f)
3134 +#define __swp_offset(x) ((x).val >> 11)
3135 +
3136 +#define __swp_entry(type, offset) \
3137 + ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
3138 +#define __pte_to_swp_entry(pte) \
3139 + ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
3140 +#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
3141 +
3142 +#define kern_addr_valid(addr) (1)
3143 +
3144 +#include <asm-generic/pgtable.h>
3145 +
3146 +/* Clear a kernel PTE and flush it from the TLB */
3147 +#define kpte_clear_flush(ptep, vaddr) \
3148 +do { \
3149 + pte_clear(&init_mm, (vaddr), (ptep)); \
3150 + __flush_tlb_one((vaddr)); \
3151 +} while (0)
3152 +
3153 +#endif
3154 --- /dev/null
3155 +++ b/arch/um/include/uapi/asm/processor-generic.h
3156 @@ -0,0 +1,115 @@
3157 +/*
3158 + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3159 + * Licensed under the GPL
3160 + */
3161 +
3162 +#ifndef __UM_PROCESSOR_GENERIC_H
3163 +#define __UM_PROCESSOR_GENERIC_H
3164 +
3165 +struct pt_regs;
3166 +
3167 +struct task_struct;
3168 +
3169 +#include <asm/ptrace.h>
3170 +#include <registers.h>
3171 +#include <sysdep/archsetjmp.h>
3172 +
3173 +#include <linux/prefetch.h>
3174 +
3175 +struct mm_struct;
3176 +
3177 +struct thread_struct {
3178 + struct pt_regs regs;
3179 + struct pt_regs *segv_regs;
3180 + int singlestep_syscall;
3181 + void *fault_addr;
3182 + jmp_buf *fault_catcher;
3183 + struct task_struct *prev_sched;
3184 + struct arch_thread arch;
3185 + jmp_buf switch_buf;
3186 + struct {
3187 + int op;
3188 + union {
3189 + struct {
3190 + int pid;
3191 + } fork, exec;
3192 + struct {
3193 + int (*proc)(void *);
3194 + void *arg;
3195 + } thread;
3196 + struct {
3197 + void (*proc)(void *);
3198 + void *arg;
3199 + } cb;
3200 + } u;
3201 + } request;
3202 +};
3203 +
3204 +#define INIT_THREAD \
3205 +{ \
3206 + .regs = EMPTY_REGS, \
3207 + .fault_addr = NULL, \
3208 + .prev_sched = NULL, \
3209 + .arch = INIT_ARCH_THREAD, \
3210 + .request = { 0 } \
3211 +}
3212 +
3213 +static inline void release_thread(struct task_struct *task)
3214 +{
3215 +}
3216 +
3217 +extern unsigned long thread_saved_pc(struct task_struct *t);
3218 +
3219 +static inline void mm_copy_segments(struct mm_struct *from_mm,
3220 + struct mm_struct *new_mm)
3221 +{
3222 +}
3223 +
3224 +#define init_stack (init_thread_union.stack)
3225 +
3226 +/*
3227 + * User space process size: 3GB (default).
3228 + */
3229 +extern unsigned long task_size;
3230 +
3231 +#define TASK_SIZE (task_size)
3232 +
3233 +#undef STACK_TOP
3234 +#undef STACK_TOP_MAX
3235 +
3236 +extern unsigned long stacksizelim;
3237 +
3238 +#define STACK_ROOM (stacksizelim)
3239 +#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
3240 +#define STACK_TOP_MAX STACK_TOP
3241 +
3242 +/* This decides where the kernel will search for a free chunk of vm
3243 + * space during mmap's.
3244 + */
3245 +#define TASK_UNMAPPED_BASE (0x40000000)
3246 +
3247 +extern void start_thread(struct pt_regs *regs, unsigned long entry,
3248 + unsigned long stack);
3249 +
3250 +struct cpuinfo_um {
3251 + unsigned long loops_per_jiffy;
3252 + int ipi_pipe[2];
3253 +};
3254 +
3255 +extern struct cpuinfo_um boot_cpu_data;
3256 +
3257 +#define my_cpu_data cpu_data[smp_processor_id()]
3258 +
3259 +#ifdef CONFIG_SMP
3260 +extern struct cpuinfo_um cpu_data[];
3261 +#define current_cpu_data cpu_data[smp_processor_id()]
3262 +#else
3263 +#define cpu_data (&boot_cpu_data)
3264 +#define current_cpu_data boot_cpu_data
3265 +#endif
3266 +
3267 +
3268 +#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
3269 +extern unsigned long get_wchan(struct task_struct *p);
3270 +
3271 +#endif
3272 --- /dev/null
3273 +++ b/arch/um/include/uapi/asm/ptrace-generic.h
3274 @@ -0,0 +1,45 @@
3275 +/*
3276 + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3277 + * Licensed under the GPL
3278 + */
3279 +
3280 +#ifndef __UM_PTRACE_GENERIC_H
3281 +#define __UM_PTRACE_GENERIC_H
3282 +
3283 +#ifndef __ASSEMBLY__
3284 +
3285 +#include <asm/ptrace-abi.h>
3286 +#include <sysdep/ptrace.h>
3287 +
3288 +struct pt_regs {
3289 + struct uml_pt_regs regs;
3290 +};
3291 +
3292 +#define arch_has_single_step() (1)
3293 +
3294 +#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
3295 +
3296 +#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
3297 +#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
3298 +
3299 +#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs)
3300 +
3301 +#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs)
3302 +
3303 +#define instruction_pointer(regs) PT_REGS_IP(regs)
3304 +
3305 +struct task_struct;
3306 +
3307 +extern long subarch_ptrace(struct task_struct *child, long request,
3308 + unsigned long addr, unsigned long data);
3309 +extern unsigned long getreg(struct task_struct *child, int regno);
3310 +extern int putreg(struct task_struct *child, int regno, unsigned long value);
3311 +
3312 +extern int arch_copy_tls(struct task_struct *new);
3313 +extern void clear_flushed_tls(struct task_struct *task);
3314 +extern void syscall_trace_enter(struct pt_regs *regs);
3315 +extern void syscall_trace_leave(struct pt_regs *regs);
3316 +
3317 +#endif
3318 +
3319 +#endif
3320 --- /dev/null
3321 +++ b/arch/um/include/uapi/asm/setup.h
3322 @@ -0,0 +1,10 @@
3323 +#ifndef SETUP_H_INCLUDED
3324 +#define SETUP_H_INCLUDED
3325 +
3326 +/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the
3327 + * command line, so this choice is ok.
3328 + */
3329 +
3330 +#define COMMAND_LINE_SIZE 4096
3331 +
3332 +#endif /* SETUP_H_INCLUDED */
3333 --- /dev/null
3334 +++ b/arch/um/include/uapi/asm/smp.h
3335 @@ -0,0 +1,32 @@
3336 +#ifndef __UM_SMP_H
3337 +#define __UM_SMP_H
3338 +
3339 +#ifdef CONFIG_SMP
3340 +
3341 +#include <linux/bitops.h>
3342 +#include <asm/current.h>
3343 +#include <linux/cpumask.h>
3344 +
3345 +#define raw_smp_processor_id() (current_thread->cpu)
3346 +
3347 +#define cpu_logical_map(n) (n)
3348 +#define cpu_number_map(n) (n)
3349 +extern int hard_smp_processor_id(void);
3350 +#define NO_PROC_ID -1
3351 +
3352 +extern int ncpus;
3353 +
3354 +
3355 +static inline void smp_cpus_done(unsigned int maxcpus)
3356 +{
3357 +}
3358 +
3359 +extern struct task_struct *idle_threads[NR_CPUS];
3360 +
3361 +#else
3362 +
3363 +#define hard_smp_processor_id() 0
3364 +
3365 +#endif
3366 +
3367 +#endif
3368 --- /dev/null
3369 +++ b/arch/um/include/uapi/asm/sysrq.h
3370 @@ -0,0 +1,7 @@
3371 +#ifndef __UM_SYSRQ_H
3372 +#define __UM_SYSRQ_H
3373 +
3374 +struct task_struct;
3375 +extern void show_trace(struct task_struct* task, unsigned long *stack);
3376 +
3377 +#endif
3378 --- /dev/null
3379 +++ b/arch/um/include/uapi/asm/thread_info.h
3380 @@ -0,0 +1,78 @@
3381 +/*
3382 + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3383 + * Licensed under the GPL
3384 + */
3385 +
3386 +#ifndef __UM_THREAD_INFO_H
3387 +#define __UM_THREAD_INFO_H
3388 +
3389 +#ifndef __ASSEMBLY__
3390 +
3391 +#include <asm/types.h>
3392 +#include <asm/page.h>
3393 +#include <asm/uaccess.h>
3394 +
3395 +struct thread_info {
3396 + struct task_struct *task; /* main task structure */
3397 + struct exec_domain *exec_domain; /* execution domain */
3398 + unsigned long flags; /* low level flags */
3399 + __u32 cpu; /* current CPU */
3400 + int preempt_count; /* 0 => preemptable,
3401 + <0 => BUG */
3402 + mm_segment_t addr_limit; /* thread address space:
3403 + 0-0xBFFFFFFF for user
3404 + 0-0xFFFFFFFF for kernel */
3405 + struct restart_block restart_block;
3406 + struct thread_info *real_thread; /* Points to non-IRQ stack */
3407 +};
3408 +
3409 +#define INIT_THREAD_INFO(tsk) \
3410 +{ \
3411 + .task = &tsk, \
3412 + .exec_domain = &default_exec_domain, \
3413 + .flags = 0, \
3414 + .cpu = 0, \
3415 + .preempt_count = INIT_PREEMPT_COUNT, \
3416 + .addr_limit = KERNEL_DS, \
3417 + .restart_block = { \
3418 + .fn = do_no_restart_syscall, \
3419 + }, \
3420 + .real_thread = NULL, \
3421 +}
3422 +
3423 +#define init_thread_info (init_thread_union.thread_info)
3424 +#define init_stack (init_thread_union.stack)
3425 +
3426 +#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
3427 +/* how to get the thread information struct from C */
3428 +static inline struct thread_info *current_thread_info(void)
3429 +{
3430 + struct thread_info *ti;
3431 + unsigned long mask = THREAD_SIZE - 1;
3432 + void *p;
3433 +
3434 + asm volatile ("" : "=r" (p) : "0" (&ti));
3435 + ti = (struct thread_info *) (((unsigned long)p) & ~mask);
3436 + return ti;
3437 +}
3438 +
3439 +#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
3440 +
3441 +#endif
3442 +
3443 +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
3444 +#define TIF_SIGPENDING 1 /* signal pending */
3445 +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
3446 +#define TIF_RESTART_BLOCK 4
3447 +#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
3448 +#define TIF_SYSCALL_AUDIT 6
3449 +#define TIF_RESTORE_SIGMASK 7
3450 +#define TIF_NOTIFY_RESUME 8
3451 +
3452 +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
3453 +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
3454 +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
3455 +#define _TIF_MEMDIE (1 << TIF_MEMDIE)
3456 +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
3457 +
3458 +#endif
3459 --- /dev/null
3460 +++ b/arch/um/include/uapi/asm/timex.h
3461 @@ -0,0 +1,13 @@
3462 +#ifndef __UM_TIMEX_H
3463 +#define __UM_TIMEX_H
3464 +
3465 +typedef unsigned long cycles_t;
3466 +
3467 +static inline cycles_t get_cycles (void)
3468 +{
3469 + return 0;
3470 +}
3471 +
3472 +#define CLOCK_TICK_RATE (HZ)
3473 +
3474 +#endif
3475 --- /dev/null
3476 +++ b/arch/um/include/uapi/asm/tlb.h
3477 @@ -0,0 +1,122 @@
3478 +#ifndef __UM_TLB_H
3479 +#define __UM_TLB_H
3480 +
3481 +#include <linux/pagemap.h>
3482 +#include <linux/swap.h>
3483 +#include <asm/percpu.h>
3484 +#include <asm/pgalloc.h>
3485 +#include <asm/tlbflush.h>
3486 +
3487 +#define tlb_start_vma(tlb, vma) do { } while (0)
3488 +#define tlb_end_vma(tlb, vma) do { } while (0)
3489 +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
3490 +
3491 +/* struct mmu_gather is an opaque type used by the mm code for passing around
3492 + * any data needed by arch specific code for tlb_remove_page.
3493 + */
3494 +struct mmu_gather {
3495 + struct mm_struct *mm;
3496 + unsigned int need_flush; /* Really unmapped some ptes? */
3497 + unsigned long start;
3498 + unsigned long end;
3499 + unsigned int fullmm; /* non-zero means full mm flush */
3500 +};
3501 +
3502 +static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
3503 + unsigned long address)
3504 +{
3505 + if (tlb->start > address)
3506 + tlb->start = address;
3507 + if (tlb->end < address + PAGE_SIZE)
3508 + tlb->end = address + PAGE_SIZE;
3509 +}
3510 +
3511 +static inline void init_tlb_gather(struct mmu_gather *tlb)
3512 +{
3513 + tlb->need_flush = 0;
3514 +
3515 + tlb->start = TASK_SIZE;
3516 + tlb->end = 0;
3517 +
3518 + if (tlb->fullmm) {
3519 + tlb->start = 0;
3520 + tlb->end = TASK_SIZE;
3521 + }
3522 +}
3523 +
3524 +static inline void
3525 +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
3526 +{
3527 + tlb->mm = mm;
3528 + tlb->start = start;
3529 + tlb->end = end;
3530 + tlb->fullmm = !(start | (end+1));
3531 +
3532 + init_tlb_gather(tlb);
3533 +}
3534 +
3535 +extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
3536 + unsigned long end);
3537 +
3538 +static inline void
3539 +tlb_flush_mmu(struct mmu_gather *tlb)
3540 +{
3541 + if (!tlb->need_flush)
3542 + return;
3543 +
3544 + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
3545 + init_tlb_gather(tlb);
3546 +}
3547 +
3548 +/* tlb_finish_mmu
3549 + * Called at the end of the shootdown operation to free up any resources
3550 + * that were required.
3551 + */
3552 +static inline void
3553 +tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
3554 +{
3555 + tlb_flush_mmu(tlb);
3556 +
3557 + /* keep the page table cache within bounds */
3558 + check_pgt_cache();
3559 +}
3560 +
3561 +/* tlb_remove_page
3562 + * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
3563 + * while handling the additional races in SMP caused by other CPUs
3564 + * caching valid mappings in their TLBs.
3565 + */
3566 +static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
3567 +{
3568 + tlb->need_flush = 1;
3569 + free_page_and_swap_cache(page);
3570 + return 1; /* avoid calling tlb_flush_mmu */
3571 +}
3572 +
3573 +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
3574 +{
3575 + __tlb_remove_page(tlb, page);
3576 +}
3577 +
3578 +/**
3579 + * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
3580 + *
3581 + * Record the fact that pte's were really umapped in ->need_flush, so we can
3582 + * later optimise away the tlb invalidate. This helps when userspace is
3583 + * unmapping already-unmapped pages, which happens quite a lot.
3584 + */
3585 +#define tlb_remove_tlb_entry(tlb, ptep, address) \
3586 + do { \
3587 + tlb->need_flush = 1; \
3588 + __tlb_remove_tlb_entry(tlb, ptep, address); \
3589 + } while (0)
3590 +
3591 +#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
3592 +
3593 +#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
3594 +
3595 +#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
3596 +
3597 +#define tlb_migrate_finish(mm) do {} while (0)
3598 +
3599 +#endif
3600 --- /dev/null
3601 +++ b/arch/um/include/uapi/asm/tlbflush.h
3602 @@ -0,0 +1,31 @@
3603 +/*
3604 + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3605 + * Licensed under the GPL
3606 + */
3607 +
3608 +#ifndef __UM_TLBFLUSH_H
3609 +#define __UM_TLBFLUSH_H
3610 +
3611 +#include <linux/mm.h>
3612 +
3613 +/*
3614 + * TLB flushing:
3615 + *
3616 + * - flush_tlb() flushes the current mm struct TLBs
3617 + * - flush_tlb_all() flushes all processes TLBs
3618 + * - flush_tlb_mm(mm) flushes the specified mm context TLB's
3619 + * - flush_tlb_page(vma, vmaddr) flushes one page
3620 + * - flush_tlb_kernel_vm() flushes the kernel vm area
3621 + * - flush_tlb_range(vma, start, end) flushes a range of pages
3622 + */
3623 +
3624 +extern void flush_tlb_all(void);
3625 +extern void flush_tlb_mm(struct mm_struct *mm);
3626 +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
3627 + unsigned long end);
3628 +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
3629 +extern void flush_tlb_kernel_vm(void);
3630 +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
3631 +extern void __flush_tlb_one(unsigned long addr);
3632 +
3633 +#endif
3634 --- /dev/null
3635 +++ b/arch/um/include/uapi/asm/uaccess.h
3636 @@ -0,0 +1,178 @@
3637 +/*
3638 + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3639 + * Licensed under the GPL
3640 + */
3641 +
3642 +#ifndef __UM_UACCESS_H
3643 +#define __UM_UACCESS_H
3644 +
3645 +/* thread_info has a mm_segment_t in it, so put the definition up here */
3646 +typedef struct {
3647 + unsigned long seg;
3648 +} mm_segment_t;
3649 +
3650 +#include <linux/thread_info.h>
3651 +#include <linux/errno.h>
3652 +#include <asm/processor.h>
3653 +#include <asm/elf.h>
3654 +
3655 +#define VERIFY_READ 0
3656 +#define VERIFY_WRITE 1
3657 +
3658 +/*
3659 + * The fs value determines whether argument validity checking should be
3660 + * performed or not. If get_fs() == USER_DS, checking is performed, with
3661 + * get_fs() == KERNEL_DS, checking is bypassed.
3662 + *
3663 + * For historical reasons, these macros are grossly misnamed.
3664 + */
3665 +
3666 +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
3667 +
3668 +#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
3669 +#define USER_DS MAKE_MM_SEG(TASK_SIZE)
3670 +
3671 +#define get_ds() (KERNEL_DS)
3672 +#define get_fs() (current_thread_info()->addr_limit)
3673 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
3674 +
3675 +#define segment_eq(a, b) ((a).seg == (b).seg)
3676 +
3677 +#define __under_task_size(addr, size) \
3678 + (((unsigned long) (addr) < TASK_SIZE) && \
3679 + (((unsigned long) (addr) + (size)) < TASK_SIZE))
3680 +
3681 +#define __access_ok_vsyscall(type, addr, size) \
3682 + ((type == VERIFY_READ) && \
3683 + ((unsigned long) (addr) >= FIXADDR_USER_START) && \
3684 + ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
3685 + ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
3686 +
3687 +#define __addr_range_nowrap(addr, size) \
3688 + ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
3689 +
3690 +#define access_ok(type, addr, size) \
3691 + (__addr_range_nowrap(addr, size) && \
3692 + (__under_task_size(addr, size) || \
3693 + __access_ok_vsyscall(type, addr, size) || \
3694 + segment_eq(get_fs(), KERNEL_DS)))
3695 +
3696 +extern int copy_from_user(void *to, const void __user *from, int n);
3697 +extern int copy_to_user(void __user *to, const void *from, int n);
3698 +
3699 +/*
3700 + * strncpy_from_user: - Copy a NUL terminated string from userspace.
3701 + * @dst: Destination address, in kernel space. This buffer must be at
3702 + * least @count bytes long.
3703 + * @src: Source address, in user space.
3704 + * @count: Maximum number of bytes to copy, including the trailing NUL.
3705 + *
3706 + * Copies a NUL-terminated string from userspace to kernel space.
3707 + *
3708 + * On success, returns the length of the string (not including the trailing
3709 + * NUL).
3710 + *
3711 + * If access to userspace fails, returns -EFAULT (some data may have been
3712 + * copied).
3713 + *
3714 + * If @count is smaller than the length of the string, copies @count bytes
3715 + * and returns @count.
3716 + */
3717 +
3718 +extern int strncpy_from_user(char *dst, const char __user *src, int count);
3719 +
3720 +/*
3721 + * __clear_user: - Zero a block of memory in user space, with less checking.
3722 + * @to: Destination address, in user space.
3723 + * @n: Number of bytes to zero.
3724 + *
3725 + * Zero a block of memory in user space. Caller must check
3726 + * the specified block with access_ok() before calling this function.
3727 + *
3728 + * Returns number of bytes that could not be cleared.
3729 + * On success, this will be zero.
3730 + */
3731 +extern int __clear_user(void __user *mem, int len);
3732 +
3733 +/*
3734 + * clear_user: - Zero a block of memory in user space.
3735 + * @to: Destination address, in user space.
3736 + * @n: Number of bytes to zero.
3737 + *
3738 + * Zero a block of memory in user space.
3739 + *
3740 + * Returns number of bytes that could not be cleared.
3741 + * On success, this will be zero.
3742 + */
3743 +extern int clear_user(void __user *mem, int len);
3744 +
3745 +/*
3746 + * strlen_user: - Get the size of a string in user space.
3747 + * @str: The string to measure.
3748 + * @n: The maximum valid length
3749 + *
3750 + * Get the size of a NUL-terminated string in user space.
3751 + *
3752 + * Returns the size of the string INCLUDING the terminating NUL.
3753 + * On exception, returns 0.
3754 + * If the string is too long, returns a value greater than @n.
3755 + */
3756 +extern int strnlen_user(const void __user *str, int len);
3757 +
3758 +#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
3759 +
3760 +#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
3761 +
3762 +#define __copy_to_user_inatomic __copy_to_user
3763 +#define __copy_from_user_inatomic __copy_from_user
3764 +
3765 +#define __get_user(x, ptr) \
3766 +({ \
3767 + const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
3768 + __typeof__(x) __private_val; \
3769 + int __private_ret = -EFAULT; \
3770 + (x) = (__typeof__(*(__private_ptr)))0; \
3771 + if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
3772 + sizeof(*(__private_ptr))) == 0) { \
3773 + (x) = (__typeof__(*(__private_ptr))) __private_val; \
3774 + __private_ret = 0; \
3775 + } \
3776 + __private_ret; \
3777 +})
3778 +
3779 +#define get_user(x, ptr) \
3780 +({ \
3781 + const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
3782 + (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
3783 + __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
3784 +})
3785 +
3786 +#define __put_user(x, ptr) \
3787 +({ \
3788 + __typeof__(*(ptr)) __user *__private_ptr = ptr; \
3789 + __typeof__(*(__private_ptr)) __private_val; \
3790 + int __private_ret = -EFAULT; \
3791 + __private_val = (__typeof__(*(__private_ptr))) (x); \
3792 + if (__copy_to_user((__private_ptr), &__private_val, \
3793 + sizeof(*(__private_ptr))) == 0) { \
3794 + __private_ret = 0; \
3795 + } \
3796 + __private_ret; \
3797 +})
3798 +
3799 +#define put_user(x, ptr) \
3800 +({ \
3801 + __typeof__(*(ptr)) __user *private_ptr = (ptr); \
3802 + (access_ok(VERIFY_WRITE, private_ptr, sizeof(*private_ptr)) ? \
3803 + __put_user(x, private_ptr) : -EFAULT); \
3804 +})
3805 +
3806 +#define strlen_user(str) strnlen_user(str, ~0U >> 1)
3807 +
3808 +struct exception_table_entry
3809 +{
3810 + unsigned long insn;
3811 + unsigned long fixup;
3812 +};
3813 +
3814 +#endif