[MIPS] Implement flush_anon_page().
[openwrt/staging/blogic.git] / arch / mips / mm / init.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
10 */
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/signal.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/pagemap.h>
20 #include <linux/ptrace.h>
21 #include <linux/mman.h>
22 #include <linux/mm.h>
23 #include <linux/bootmem.h>
24 #include <linux/highmem.h>
25 #include <linux/swap.h>
26 #include <linux/proc_fs.h>
27 #include <linux/pfn.h>
28
29 #include <asm/bootinfo.h>
30 #include <asm/cachectl.h>
31 #include <asm/cpu.h>
32 #include <asm/dma.h>
33 #include <asm/kmap_types.h>
34 #include <asm/mmu_context.h>
35 #include <asm/sections.h>
36 #include <asm/pgtable.h>
37 #include <asm/pgalloc.h>
38 #include <asm/tlb.h>
39 #include <asm/fixmap.h>
40
41 /* Atomicity and interruptability */
42 #ifdef CONFIG_MIPS_MT_SMTC
43
44 #include <asm/mipsmtregs.h>
45
46 #define ENTER_CRITICAL(flags) \
47 { \
48 unsigned int mvpflags; \
49 local_irq_save(flags);\
50 mvpflags = dvpe()
51 #define EXIT_CRITICAL(flags) \
52 evpe(mvpflags); \
53 local_irq_restore(flags); \
54 }
55 #else
56
57 #define ENTER_CRITICAL(flags) local_irq_save(flags)
58 #define EXIT_CRITICAL(flags) local_irq_restore(flags)
59
60 #endif /* CONFIG_MIPS_MT_SMTC */
61
62 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
63
64 /*
65 * We have up to 8 empty zeroed pages so we can map one of the right colour
66 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
67 * where we have to avoid VCED / VECI exceptions for good performance at
68 * any price. Since page is never written to after the initialization we
69 * don't have to care about aliases on other CPUs.
70 */
71 unsigned long empty_zero_page, zero_page_mask;
72
73 /*
74 * Not static inline because used by IP27 special magic initialization code
75 */
76 unsigned long setup_zero_pages(void)
77 {
78 unsigned int order;
79 unsigned long size;
80 struct page *page;
81
82 if (cpu_has_vce)
83 order = 3;
84 else
85 order = 0;
86
87 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
88 if (!empty_zero_page)
89 panic("Oh boy, that early out of memory?");
90
91 page = virt_to_page((void *)empty_zero_page);
92 split_page(page, order);
93 while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
94 SetPageReserved(page);
95 page++;
96 }
97
98 size = PAGE_SIZE << order;
99 zero_page_mask = (size - 1) & PAGE_MASK;
100
101 return 1UL << order;
102 }
103
104 /*
105 * These are almost like kmap_atomic / kunmap_atmic except they take an
106 * additional address argument as the hint.
107 */
108
109 #define kmap_get_fixmap_pte(vaddr) \
110 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
111
112 #ifdef CONFIG_MIPS_MT_SMTC
113 static pte_t *kmap_coherent_pte;
114 static void __init kmap_coherent_init(void)
115 {
116 unsigned long vaddr;
117
118 /* cache the first coherent kmap pte */
119 vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
120 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
121 }
122 #else
123 static inline void kmap_coherent_init(void) {}
124 #endif
125
126 void *kmap_coherent(struct page *page, unsigned long addr)
127 {
128 enum fixed_addresses idx;
129 unsigned long vaddr, flags, entrylo;
130 unsigned long old_ctx;
131 pte_t pte;
132 int tlbidx;
133
134 inc_preempt_count();
135 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
136 #ifdef CONFIG_MIPS_MT_SMTC
137 idx += FIX_N_COLOURS * smp_processor_id();
138 #endif
139 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
140 pte = mk_pte(page, PAGE_KERNEL);
141 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
142 entrylo = pte.pte_high;
143 #else
144 entrylo = pte_val(pte) >> 6;
145 #endif
146
147 ENTER_CRITICAL(flags);
148 old_ctx = read_c0_entryhi();
149 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
150 write_c0_entrylo0(entrylo);
151 write_c0_entrylo1(entrylo);
152 #ifdef CONFIG_MIPS_MT_SMTC
153 set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
154 /* preload TLB instead of local_flush_tlb_one() */
155 mtc0_tlbw_hazard();
156 tlb_probe();
157 tlb_probe_hazard();
158 tlbidx = read_c0_index();
159 mtc0_tlbw_hazard();
160 if (tlbidx < 0)
161 tlb_write_random();
162 else
163 tlb_write_indexed();
164 #else
165 tlbidx = read_c0_wired();
166 write_c0_wired(tlbidx + 1);
167 write_c0_index(tlbidx);
168 mtc0_tlbw_hazard();
169 tlb_write_indexed();
170 #endif
171 tlbw_use_hazard();
172 write_c0_entryhi(old_ctx);
173 EXIT_CRITICAL(flags);
174
175 return (void*) vaddr;
176 }
177
178 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
179
180 void kunmap_coherent(struct page *page)
181 {
182 #ifndef CONFIG_MIPS_MT_SMTC
183 unsigned int wired;
184 unsigned long flags, old_ctx;
185
186 ENTER_CRITICAL(flags);
187 old_ctx = read_c0_entryhi();
188 wired = read_c0_wired() - 1;
189 write_c0_wired(wired);
190 write_c0_index(wired);
191 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
192 write_c0_entrylo0(0);
193 write_c0_entrylo1(0);
194 mtc0_tlbw_hazard();
195 tlb_write_indexed();
196 tlbw_use_hazard();
197 write_c0_entryhi(old_ctx);
198 EXIT_CRITICAL(flags);
199 #endif
200 dec_preempt_count();
201 preempt_check_resched();
202 }
203
204 void copy_user_highpage(struct page *to, struct page *from,
205 unsigned long vaddr, struct vm_area_struct *vma)
206 {
207 void *vfrom, *vto;
208
209 vto = kmap_atomic(to, KM_USER1);
210 if (cpu_has_dc_aliases) {
211 vfrom = kmap_coherent(from, vaddr);
212 copy_page(vto, vfrom);
213 kunmap_coherent(from);
214 } else {
215 vfrom = kmap_atomic(from, KM_USER0);
216 copy_page(vto, vfrom);
217 kunmap_atomic(vfrom, KM_USER0);
218 }
219 if (((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) ||
220 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
221 flush_data_cache_page((unsigned long)vto);
222 kunmap_atomic(vto, KM_USER1);
223 /* Make sure this page is cleared on other CPU's too before using it */
224 smp_wmb();
225 }
226
227 EXPORT_SYMBOL(copy_user_highpage);
228
229 void copy_to_user_page(struct vm_area_struct *vma,
230 struct page *page, unsigned long vaddr, void *dst, const void *src,
231 unsigned long len)
232 {
233 if (cpu_has_dc_aliases) {
234 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
235 memcpy(vto, src, len);
236 kunmap_coherent(page);
237 } else
238 memcpy(dst, src, len);
239 if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
240 flush_cache_page(vma, vaddr, page_to_pfn(page));
241 }
242
243 EXPORT_SYMBOL(copy_to_user_page);
244
245 void copy_from_user_page(struct vm_area_struct *vma,
246 struct page *page, unsigned long vaddr, void *dst, const void *src,
247 unsigned long len)
248 {
249 if (cpu_has_dc_aliases) {
250 void *vfrom =
251 kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
252 memcpy(dst, vfrom, len);
253 kunmap_coherent(page);
254 } else
255 memcpy(dst, src, len);
256 }
257
258 EXPORT_SYMBOL(copy_from_user_page);
259
260
261 #ifdef CONFIG_HIGHMEM
262 unsigned long highstart_pfn, highend_pfn;
263
264 pte_t *kmap_pte;
265 pgprot_t kmap_prot;
266
267 static void __init kmap_init(void)
268 {
269 unsigned long kmap_vstart;
270
271 /* cache the first kmap pte */
272 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
273 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
274
275 kmap_prot = PAGE_KERNEL;
276 }
277 #endif /* CONFIG_HIGHMEM */
278
279 void __init fixrange_init(unsigned long start, unsigned long end,
280 pgd_t *pgd_base)
281 {
282 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
283 pgd_t *pgd;
284 pud_t *pud;
285 pmd_t *pmd;
286 pte_t *pte;
287 int i, j, k;
288 unsigned long vaddr;
289
290 vaddr = start;
291 i = __pgd_offset(vaddr);
292 j = __pud_offset(vaddr);
293 k = __pmd_offset(vaddr);
294 pgd = pgd_base + i;
295
296 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
297 pud = (pud_t *)pgd;
298 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
299 pmd = (pmd_t *)pud;
300 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
301 if (pmd_none(*pmd)) {
302 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
303 set_pmd(pmd, __pmd((unsigned long)pte));
304 if (pte != pte_offset_kernel(pmd, 0))
305 BUG();
306 }
307 vaddr += PMD_SIZE;
308 }
309 k = 0;
310 }
311 j = 0;
312 }
313 #endif
314 }
315
316 #ifndef CONFIG_NEED_MULTIPLE_NODES
317 static int __init page_is_ram(unsigned long pagenr)
318 {
319 int i;
320
321 for (i = 0; i < boot_mem_map.nr_map; i++) {
322 unsigned long addr, end;
323
324 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
325 /* not usable memory */
326 continue;
327
328 addr = PFN_UP(boot_mem_map.map[i].addr);
329 end = PFN_DOWN(boot_mem_map.map[i].addr +
330 boot_mem_map.map[i].size);
331
332 if (pagenr >= addr && pagenr < end)
333 return 1;
334 }
335
336 return 0;
337 }
338
339 void __init paging_init(void)
340 {
341 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
342 #ifndef CONFIG_FLATMEM
343 unsigned long zholes_size[MAX_NR_ZONES] = { 0, };
344 unsigned long i, j, pfn;
345 #endif
346
347 pagetable_init();
348
349 #ifdef CONFIG_HIGHMEM
350 kmap_init();
351 #endif
352 kmap_coherent_init();
353
354 #ifdef CONFIG_ISA
355 if (max_low_pfn >= MAX_DMA_PFN)
356 if (min_low_pfn >= MAX_DMA_PFN) {
357 zones_size[ZONE_DMA] = 0;
358 zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
359 } else {
360 zones_size[ZONE_DMA] = MAX_DMA_PFN - min_low_pfn;
361 zones_size[ZONE_NORMAL] = max_low_pfn - MAX_DMA_PFN;
362 }
363 else
364 #endif
365 zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn;
366
367 #ifdef CONFIG_HIGHMEM
368 zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn;
369
370 if (cpu_has_dc_aliases && zones_size[ZONE_HIGHMEM]) {
371 printk(KERN_WARNING "This processor doesn't support highmem."
372 " %ldk highmem ignored\n", zones_size[ZONE_HIGHMEM]);
373 zones_size[ZONE_HIGHMEM] = 0;
374 }
375 #endif
376
377 #ifdef CONFIG_FLATMEM
378 free_area_init(zones_size);
379 #else
380 pfn = min_low_pfn;
381 for (i = 0; i < MAX_NR_ZONES; i++)
382 for (j = 0; j < zones_size[i]; j++, pfn++)
383 if (!page_is_ram(pfn))
384 zholes_size[i]++;
385 free_area_init_node(0, NODE_DATA(0), zones_size, 0, zholes_size);
386 #endif
387 }
388
389 static struct kcore_list kcore_mem, kcore_vmalloc;
390 #ifdef CONFIG_64BIT
391 static struct kcore_list kcore_kseg0;
392 #endif
393
394 void __init mem_init(void)
395 {
396 unsigned long codesize, reservedpages, datasize, initsize;
397 unsigned long tmp, ram;
398
399 #ifdef CONFIG_HIGHMEM
400 #ifdef CONFIG_DISCONTIGMEM
401 #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
402 #endif
403 max_mapnr = highend_pfn;
404 #else
405 max_mapnr = max_low_pfn;
406 #endif
407 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
408
409 totalram_pages += free_all_bootmem();
410 totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
411
412 reservedpages = ram = 0;
413 for (tmp = 0; tmp < max_low_pfn; tmp++)
414 if (page_is_ram(tmp)) {
415 ram++;
416 if (PageReserved(pfn_to_page(tmp)))
417 reservedpages++;
418 }
419 num_physpages = ram;
420
421 #ifdef CONFIG_HIGHMEM
422 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
423 struct page *page = mem_map + tmp;
424
425 if (!page_is_ram(tmp)) {
426 SetPageReserved(page);
427 continue;
428 }
429 ClearPageReserved(page);
430 #ifdef CONFIG_LIMITED_DMA
431 set_page_address(page, lowmem_page_address(page));
432 #endif
433 init_page_count(page);
434 __free_page(page);
435 totalhigh_pages++;
436 }
437 totalram_pages += totalhigh_pages;
438 num_physpages += totalhigh_pages;
439 #endif
440
441 codesize = (unsigned long) &_etext - (unsigned long) &_text;
442 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
443 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
444
445 #ifdef CONFIG_64BIT
446 if ((unsigned long) &_text > (unsigned long) CKSEG0)
447 /* The -4 is a hack so that user tools don't have to handle
448 the overflow. */
449 kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4);
450 #endif
451 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
452 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
453 VMALLOC_END-VMALLOC_START);
454
455 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
456 "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
457 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
458 ram << (PAGE_SHIFT-10),
459 codesize >> 10,
460 reservedpages << (PAGE_SHIFT-10),
461 datasize >> 10,
462 initsize >> 10,
463 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
464 }
465 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
466
467 void free_init_pages(const char *what, unsigned long begin, unsigned long end)
468 {
469 unsigned long pfn;
470
471 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
472 struct page *page = pfn_to_page(pfn);
473 void *addr = phys_to_virt(PFN_PHYS(pfn));
474
475 ClearPageReserved(page);
476 init_page_count(page);
477 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
478 __free_page(page);
479 totalram_pages++;
480 }
481 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
482 }
483
484 #ifdef CONFIG_BLK_DEV_INITRD
485 void free_initrd_mem(unsigned long start, unsigned long end)
486 {
487 free_init_pages("initrd memory",
488 virt_to_phys((void *)start),
489 virt_to_phys((void *)end));
490 }
491 #endif
492
493 void free_initmem(void)
494 {
495 prom_free_prom_memory();
496 free_init_pages("unused kernel memory",
497 __pa_symbol(&__init_begin),
498 __pa_symbol(&__init_end));
499 }
500
501 unsigned long pgd_current[NR_CPUS];
502 /*
503 * On 64-bit we've got three-level pagetables with a slightly
504 * different layout ...
505 */
506 #define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
507 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
508 #ifdef CONFIG_64BIT
509 #ifdef MODULE_START
510 pgd_t module_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
511 #endif
512 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
513 #endif
514 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);