print kernel message when cache workarounds are enabled
[openwrt/staging/dedeckeh.git] / openwrt / target / linux / linux-2.4 / patches / brcm / 003-bcm47xx_cache_fixes.patch
1 diff -urN linux.old/arch/mips/kernel/entry.S linux.dev/arch/mips/kernel/entry.S
2 --- linux.old/arch/mips/kernel/entry.S 2005-07-05 16:46:49.000000000 +0200
3 +++ linux.dev/arch/mips/kernel/entry.S 2005-07-05 16:42:36.000000000 +0200
4 @@ -100,6 +100,10 @@
5 * and R4400 SC and MC versions.
6 */
7 NESTED(except_vec3_generic, 0, sp)
8 +#ifdef CONFIG_BCM4710
9 + nop
10 + nop
11 +#endif
12 #if R5432_CP0_INTERRUPT_WAR
13 mfc0 k0, CP0_INDEX
14 #endif
15 diff -urN linux.old/arch/mips/mm/c-r4k.c linux.dev/arch/mips/mm/c-r4k.c
16 --- linux.old/arch/mips/mm/c-r4k.c 2005-07-05 16:46:49.000000000 +0200
17 +++ linux.dev/arch/mips/mm/c-r4k.c 2005-07-05 16:48:47.000000000 +0200
18 @@ -14,6 +14,12 @@
19 #include <linux/mm.h>
20 #include <linux/bitops.h>
21
22 +#ifdef CONFIG_BCM4710
23 +#include "../bcm947xx/include/typedefs.h"
24 +#include "../bcm947xx/include/sbconfig.h"
25 +#include <asm/paccess.h>
26 +#endif
27 +
28 #include <asm/bcache.h>
29 #include <asm/bootinfo.h>
30 #include <asm/cacheops.h>
31 @@ -40,6 +46,7 @@
32 .bc_inv = (void *)no_sc_noop
33 };
34
35 +static int bcm4710 = 0;
36 struct bcache_ops *bcops = &no_sc_ops;
37
38 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x2010)
39 @@ -64,8 +71,10 @@
40 static inline void r4k_blast_dcache_page_setup(void)
41 {
42 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
43 -
44 - if (dc_lsize == 16)
45 +
46 + if (bcm4710)
47 + r4k_blast_dcache_page = blast_dcache_page;
48 + else if (dc_lsize == 16)
49 r4k_blast_dcache_page = blast_dcache16_page;
50 else if (dc_lsize == 32)
51 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
52 @@ -77,7 +86,9 @@
53 {
54 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
55
56 - if (dc_lsize == 16)
57 + if (bcm4710)
58 + r4k_blast_dcache_page_indexed = blast_dcache_page_indexed;
59 + else if (dc_lsize == 16)
60 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
61 else if (dc_lsize == 32)
62 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
63 @@ -89,7 +100,9 @@
64 {
65 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
66
67 - if (dc_lsize == 16)
68 + if (bcm4710)
69 + r4k_blast_dcache = blast_dcache;
70 + else if (dc_lsize == 16)
71 r4k_blast_dcache = blast_dcache16;
72 else if (dc_lsize == 32)
73 r4k_blast_dcache = blast_dcache32;
74 @@ -266,6 +279,7 @@
75 r4k_blast_dcache();
76 r4k_blast_icache();
77
78 + if (!bcm4710)
79 switch (current_cpu_data.cputype) {
80 case CPU_R4000SC:
81 case CPU_R4000MC:
82 @@ -304,10 +318,10 @@
83 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
84 * only flush the primary caches but R10000 and R12000 behave sane ...
85 */
86 - if (current_cpu_data.cputype == CPU_R4000SC ||
87 + if (!bcm4710 && (current_cpu_data.cputype == CPU_R4000SC ||
88 current_cpu_data.cputype == CPU_R4000MC ||
89 current_cpu_data.cputype == CPU_R4400SC ||
90 - current_cpu_data.cputype == CPU_R4400MC)
91 + current_cpu_data.cputype == CPU_R4400MC))
92 r4k_blast_scache();
93 }
94
95 @@ -383,12 +397,15 @@
96 unsigned long ic_lsize = current_cpu_data.icache.linesz;
97 unsigned long addr, aend;
98
99 + addr = start & ~(dc_lsize - 1);
100 + aend = (end - 1) & ~(dc_lsize - 1);
101 +
102 if (!cpu_has_ic_fills_f_dc) {
103 if (end - start > dcache_size)
104 r4k_blast_dcache();
105 else {
106 - addr = start & ~(dc_lsize - 1);
107 - aend = (end - 1) & ~(dc_lsize - 1);
108 + BCM4710_PROTECTED_FILL_TLB(addr);
109 + BCM4710_PROTECTED_FILL_TLB(aend);
110
111 while (1) {
112 /* Hit_Writeback_Inv_D */
113 @@ -403,8 +420,6 @@
114 if (end - start > icache_size)
115 r4k_blast_icache();
116 else {
117 - addr = start & ~(ic_lsize - 1);
118 - aend = (end - 1) & ~(ic_lsize - 1);
119 while (1) {
120 /* Hit_Invalidate_I */
121 protected_flush_icache_line(addr);
122 @@ -443,7 +458,8 @@
123 if (cpu_has_subset_pcaches) {
124 unsigned long addr = (unsigned long) page_address(page);
125
126 - r4k_blast_scache_page(addr);
127 + if (!bcm4710)
128 + r4k_blast_scache_page(addr);
129 ClearPageDcacheDirty(page);
130
131 return;
132 @@ -451,6 +467,7 @@
133
134 if (!cpu_has_ic_fills_f_dc) {
135 unsigned long addr = (unsigned long) page_address(page);
136 +
137 r4k_blast_dcache_page(addr);
138 ClearPageDcacheDirty(page);
139 }
140 @@ -477,7 +494,7 @@
141 /* Catch bad driver code */
142 BUG_ON(size == 0);
143
144 - if (cpu_has_subset_pcaches) {
145 + if (!bcm4710 && cpu_has_subset_pcaches) {
146 unsigned long sc_lsize = current_cpu_data.scache.linesz;
147
148 if (size >= scache_size) {
149 @@ -509,6 +526,8 @@
150 R4600_HIT_CACHEOP_WAR_IMPL;
151 a = addr & ~(dc_lsize - 1);
152 end = (addr + size - 1) & ~(dc_lsize - 1);
153 + BCM4710_FILL_TLB(a);
154 + BCM4710_FILL_TLB(end);
155 while (1) {
156 flush_dcache_line(a); /* Hit_Writeback_Inv_D */
157 if (a == end)
158 @@ -527,7 +546,7 @@
159 /* Catch bad driver code */
160 BUG_ON(size == 0);
161
162 - if (cpu_has_subset_pcaches) {
163 + if (!bcm4710 && (cpu_has_subset_pcaches)) {
164 unsigned long sc_lsize = current_cpu_data.scache.linesz;
165
166 if (size >= scache_size) {
167 @@ -554,6 +573,8 @@
168 R4600_HIT_CACHEOP_WAR_IMPL;
169 a = addr & ~(dc_lsize - 1);
170 end = (addr + size - 1) & ~(dc_lsize - 1);
171 + BCM4710_FILL_TLB(a);
172 + BCM4710_FILL_TLB(end);
173 while (1) {
174 flush_dcache_line(a); /* Hit_Writeback_Inv_D */
175 if (a == end)
176 @@ -577,6 +598,8 @@
177 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
178
179 R4600_HIT_CACHEOP_WAR_IMPL;
180 + BCM4710_PROTECTED_FILL_TLB(addr);
181 + BCM4710_PROTECTED_FILL_TLB(addr + 4);
182 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
183 protected_flush_icache_line(addr & ~(ic_lsize - 1));
184 if (MIPS4K_ICACHE_REFILL_WAR) {
185 @@ -986,10 +1009,12 @@
186 case CPU_R4000MC:
187 case CPU_R4400SC:
188 case CPU_R4400MC:
189 - probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache));
190 - sc_present = probe_scache_kseg1(config);
191 - if (sc_present)
192 - c->options |= MIPS_CPU_CACHE_CDEX_S;
193 + if (!bcm4710) {
194 + probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache));
195 + sc_present = probe_scache_kseg1(config);
196 + if (sc_present)
197 + c->options |= MIPS_CPU_CACHE_CDEX_S;
198 + }
199 break;
200
201 case CPU_R10000:
202 @@ -1041,6 +1066,19 @@
203 static inline void coherency_setup(void)
204 {
205 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
206 +
207 +#if defined(CONFIG_BCM4310) || defined(CONFIG_BCM4704) || defined(CONFIG_BCM5365)
208 + if (BCM330X(current_cpu_data.processor_id)) {
209 + uint32 cm;
210 +
211 + cm = read_c0_diag();
212 + /* Enable icache */
213 + cm |= (1 << 31);
214 + /* Enable dcache */
215 + cm |= (1 << 30);
216 + write_c0_diag(cm);
217 + }
218 +#endif
219
220 /*
221 * c0_status.cu=0 specifies that updates by the sc instruction use
222 @@ -1062,6 +1100,42 @@
223
224 }
225
226 +#ifdef CONFIG_BCM4704
227 +static void __init mips32_icache_fill(unsigned long addr, uint nbytes)
228 +{
229 + unsigned long ic_lsize = current_cpu_data.icache.linesz;
230 + int i;
231 + for (i = 0; i < nbytes; i += ic_lsize)
232 + fill_icache_line((addr + i));
233 +}
234 +
235 +/*
236 + * This must be run from the cache on 4704A0
237 + * so there are no mips core BIU ops in progress
238 + * when the PFC is enabled.
239 + */
240 +#define PFC_CR0 0xff400000 /* control reg 0 */
241 +#define PFC_CR1 0xff400004 /* control reg 1 */
242 +static void __init enable_pfc(u32 mode)
243 +{
244 + /* write range */
245 + *(volatile u32 *)PFC_CR1 = 0xffff0000;
246 +
247 + /* enable */
248 + *(volatile u32 *)PFC_CR0 = mode;
249 +}
250 +
251 +void check_enable_mips_pfc(int val)
252 +{
253 + /* enable prefetch cache */
254 + if (BCM330X(current_cpu_data.processor_id)
255 + && (read_c0_diag() & (1 << 29))) {
256 + mips32_icache_fill((unsigned long) &enable_pfc, 64);
257 + enable_pfc(val);
258 + }
259 +}
260 +#endif
261 +
262 void __init ld_mmu_r4xx0(void)
263 {
264 extern void build_clear_page(void);
265 @@ -1073,6 +1147,12 @@
266 memcpy((void *)(KSEG0 + 0x100), &except_vec2_generic, 0x80);
267 memcpy((void *)(KSEG1 + 0x100), &except_vec2_generic, 0x80);
268
269 + if (current_cpu_data.cputype == CPU_BCM4710 && (current_cpu_data.processor_id & PRID_REV_MASK) == 0) {
270 + printk("Enabling BCM4710A0 cache workarounds.\n");
271 + bcm4710 = 1;
272 + } else
273 + bcm4710 = 0;
274 +
275 probe_pcache();
276 setup_scache();
277
278 @@ -1117,47 +1197,9 @@
279
280 build_clear_page();
281 build_copy_page();
282 -}
283 -
284 -#ifdef CONFIG_BCM4704
285 -static void __init mips32_icache_fill(unsigned long addr, uint nbytes)
286 -{
287 - unsigned long ic_lsize = current_cpu_data.icache.linesz;
288 - int i;
289 - for (i = 0; i < nbytes; i += ic_lsize)
290 - fill_icache_line((addr + i));
291 -}
292 -
293 -/*
294 - * This must be run from the cache on 4704A0
295 - * so there are no mips core BIU ops in progress
296 - * when the PFC is enabled.
297 - */
298 -#define PFC_CR0 0xff400000 /* control reg 0 */
299 -#define PFC_CR1 0xff400004 /* control reg 1 */
300 -static void __init enable_pfc(u32 mode)
301 -{
302 - /* write range */
303 - *(volatile u32 *)PFC_CR1 = 0xffff0000;
304 -
305 - /* enable */
306 - *(volatile u32 *)PFC_CR0 = mode;
307 -}
308 -#endif
309 -
310 -
311 -void check_enable_mips_pfc(int val)
312 -{
313 -
314 +
315 #ifdef CONFIG_BCM4704
316 - struct cpuinfo_mips *c = &current_cpu_data;
317 -
318 - /* enable prefetch cache */
319 - if (((c->processor_id & (PRID_COMP_MASK | PRID_IMP_MASK)) == PRID_IMP_BCM3302)
320 - && (read_c0_diag() & (1 << 29))) {
321 - mips32_icache_fill((unsigned long) &enable_pfc, 64);
322 - enable_pfc(val);
323 - }
324 + check_enable_mips_pfc(0x15);
325 #endif
326 }
327
328 diff -urN linux.old/arch/mips/mm/tlb-r4k.c linux.dev/arch/mips/mm/tlb-r4k.c
329 --- linux.old/arch/mips/mm/tlb-r4k.c 2005-07-05 16:46:49.000000000 +0200
330 +++ linux.dev/arch/mips/mm/tlb-r4k.c 2005-07-05 16:42:36.000000000 +0200
331 @@ -38,6 +38,7 @@
332 old_ctx = read_c0_entryhi();
333 write_c0_entrylo0(0);
334 write_c0_entrylo1(0);
335 + BARRIER;
336
337 entry = read_c0_wired();
338
339 @@ -47,6 +48,7 @@
340 write_c0_index(entry);
341 mtc0_tlbw_hazard();
342 tlb_write_indexed();
343 + BARRIER;
344 entry++;
345 }
346 tlbw_use_hazard();
347 @@ -98,6 +100,7 @@
348 write_c0_entryhi(KSEG0 + idx*0x2000);
349 mtc0_tlbw_hazard();
350 tlb_write_indexed();
351 + BARRIER;
352 }
353 tlbw_use_hazard();
354 write_c0_entryhi(oldpid);
355 @@ -136,6 +139,7 @@
356 tlbw_use_hazard();
357
358 finish:
359 + BARRIER;
360 write_c0_entryhi(oldpid);
361 local_irq_restore(flags);
362 }
363 @@ -204,6 +208,7 @@
364 pmdp = pmd_offset(pgdp, address);
365 idx = read_c0_index();
366 ptep = pte_offset(pmdp, address);
367 + BARRIER;
368 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
369 write_c0_entrylo0(ptep->pte_high);
370 ptep++;
371 @@ -220,6 +225,7 @@
372 tlb_write_indexed();
373 tlbw_use_hazard();
374 write_c0_entryhi(pid);
375 + BARRIER;
376 local_irq_restore(flags);
377 }
378
379 @@ -317,6 +323,7 @@
380 }
381
382 write_c0_index(temp_tlb_entry);
383 + BARRIER;
384 write_c0_pagemask(pagemask);
385 write_c0_entryhi(entryhi);
386 write_c0_entrylo0(entrylo0);
387 diff -urN linux.old/arch/mips/mm/tlbex-mips32.S linux.dev/arch/mips/mm/tlbex-mips32.S
388 --- linux.old/arch/mips/mm/tlbex-mips32.S 2005-07-05 16:46:49.000000000 +0200
389 +++ linux.dev/arch/mips/mm/tlbex-mips32.S 2005-07-05 16:42:36.000000000 +0200
390 @@ -90,6 +90,9 @@
391 .set noat
392 LEAF(except_vec0_r4000)
393 .set mips3
394 +#ifdef CONFIG_BCM4704
395 + nop
396 +#endif
397 #ifdef CONFIG_SMP
398 mfc0 k1, CP0_CONTEXT
399 la k0, pgd_current
400 diff -urN linux.old/include/asm-mips/r4kcache.h linux.dev/include/asm-mips/r4kcache.h
401 --- linux.old/include/asm-mips/r4kcache.h 2005-07-05 16:46:49.000000000 +0200
402 +++ linux.dev/include/asm-mips/r4kcache.h 2005-07-05 16:42:36.000000000 +0200
403 @@ -15,6 +15,18 @@
404 #include <asm/asm.h>
405 #include <asm/cacheops.h>
406
407 +#ifdef CONFIG_BCM4710
408 +#define BCM4710_DUMMY_RREG() (((sbconfig_t *)(KSEG1ADDR(SB_ENUM_BASE + SBCONFIGOFF)))->sbimstate)
409 +
410 +#define BCM4710_FILL_TLB(addr) (*(volatile unsigned long *)(addr))
411 +#define BCM4710_PROTECTED_FILL_TLB(addr) ({ unsigned long x; get_dbe(x, (volatile unsigned long *)(addr)); })
412 +#else
413 +#define BCM4710_DUMMY_RREG()
414 +
415 +#define BCM4710_FILL_TLB(addr)
416 +#define BCM4710_PROTECTED_FILL_TLB(addr)
417 +#endif
418 +
419 #define cache_op(op,addr) \
420 __asm__ __volatile__( \
421 " .set noreorder \n" \
422 @@ -32,6 +44,7 @@
423
424 static inline void flush_dcache_line_indexed(unsigned long addr)
425 {
426 + BCM4710_DUMMY_RREG();
427 cache_op(Index_Writeback_Inv_D, addr);
428 }
429
430 @@ -47,6 +60,7 @@
431
432 static inline void flush_dcache_line(unsigned long addr)
433 {
434 + BCM4710_DUMMY_RREG();
435 cache_op(Hit_Writeback_Inv_D, addr);
436 }
437
438 @@ -91,6 +105,7 @@
439 */
440 static inline void protected_writeback_dcache_line(unsigned long addr)
441 {
442 + BCM4710_DUMMY_RREG();
443 __asm__ __volatile__(
444 ".set noreorder\n\t"
445 ".set mips3\n"
446 @@ -138,6 +153,59 @@
447 : "r" (base), \
448 "i" (op));
449
450 +#define cache_unroll(base,op) \
451 + __asm__ __volatile__(" \
452 + .set noreorder; \
453 + .set mips3; \
454 + cache %1, (%0); \
455 + .set mips0; \
456 + .set reorder" \
457 + : \
458 + : "r" (base), \
459 + "i" (op));
460 +
461 +
462 +static inline void blast_dcache(void)
463 +{
464 + unsigned long start = KSEG0;
465 + unsigned long end = start + current_cpu_data.dcache.waysize;
466 +
467 + while(start < end) {
468 + BCM4710_DUMMY_RREG();
469 + cache_unroll(start,Index_Writeback_Inv_D);
470 + start += current_cpu_data.dcache.linesz;
471 + }
472 +}
473 +
474 +static inline void blast_dcache_page(unsigned long page)
475 +{
476 + unsigned long start = page;
477 + unsigned long end = start + PAGE_SIZE;
478 +
479 + BCM4710_FILL_TLB(start);
480 + do {
481 + BCM4710_DUMMY_RREG();
482 + cache_unroll(start,Hit_Writeback_Inv_D);
483 + start += current_cpu_data.dcache.linesz;
484 + } while (start < end);
485 +}
486 +
487 +static inline void blast_dcache_page_indexed(unsigned long page)
488 +{
489 + unsigned long start = page;
490 + unsigned long end = start + PAGE_SIZE;
491 + unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
492 + unsigned long ws_end = current_cpu_data.dcache.ways <<
493 + current_cpu_data.dcache.waybit;
494 + unsigned long ws, addr;
495 +
496 + for (ws = 0; ws < ws_end; ws += ws_inc)
497 + for (addr = start; addr < end; addr += start += current_cpu_data.dcache.linesz) {
498 + BCM4710_DUMMY_RREG();
499 + cache_unroll(addr,Index_Writeback_Inv_D);
500 + }
501 +}
502 +
503 static inline void blast_dcache16(void)
504 {
505 unsigned long start = KSEG0;
506 @@ -148,8 +216,9 @@
507 unsigned long ws, addr;
508
509 for (ws = 0; ws < ws_end; ws += ws_inc)
510 - for (addr = start; addr < end; addr += 0x200)
511 + for (addr = start; addr < end; addr += 0x200) {
512 cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
513 + }
514 }
515
516 static inline void blast_dcache16_page(unsigned long page)
517 @@ -173,8 +242,9 @@
518 unsigned long ws, addr;
519
520 for (ws = 0; ws < ws_end; ws += ws_inc)
521 - for (addr = start; addr < end; addr += 0x200)
522 + for (addr = start; addr < end; addr += 0x200) {
523 cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
524 + }
525 }
526
527 static inline void blast_icache16(void)
528 @@ -196,6 +266,7 @@
529 unsigned long start = page;
530 unsigned long end = start + PAGE_SIZE;
531
532 + BCM4710_FILL_TLB(start);
533 do {
534 cache16_unroll32(start,Hit_Invalidate_I);
535 start += 0x200;
536 @@ -281,6 +352,7 @@
537 : "r" (base), \
538 "i" (op));
539
540 +
541 static inline void blast_dcache32(void)
542 {
543 unsigned long start = KSEG0;
544 @@ -291,8 +363,9 @@
545 unsigned long ws, addr;
546
547 for (ws = 0; ws < ws_end; ws += ws_inc)
548 - for (addr = start; addr < end; addr += 0x400)
549 + for (addr = start; addr < end; addr += 0x400) {
550 cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
551 + }
552 }
553
554 static inline void blast_dcache32_page(unsigned long page)
555 @@ -316,8 +389,9 @@
556 unsigned long ws, addr;
557
558 for (ws = 0; ws < ws_end; ws += ws_inc)
559 - for (addr = start; addr < end; addr += 0x400)
560 + for (addr = start; addr < end; addr += 0x400) {
561 cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
562 + }
563 }
564
565 static inline void blast_icache32(void)
566 @@ -339,6 +413,7 @@
567 unsigned long start = page;
568 unsigned long end = start + PAGE_SIZE;
569
570 + BCM4710_FILL_TLB(start);
571 do {
572 cache32_unroll32(start,Hit_Invalidate_I);
573 start += 0x400;
574 @@ -443,6 +518,7 @@
575 unsigned long start = page;
576 unsigned long end = start + PAGE_SIZE;
577
578 + BCM4710_FILL_TLB(start);
579 do {
580 cache64_unroll32(start,Hit_Invalidate_I);
581 start += 0x800;
582 diff -urN linux.old/include/asm-mips/stackframe.h linux.dev/include/asm-mips/stackframe.h
583 --- linux.old/include/asm-mips/stackframe.h 2005-07-05 16:46:49.000000000 +0200
584 +++ linux.dev/include/asm-mips/stackframe.h 2005-07-05 16:42:36.000000000 +0200
585 @@ -172,6 +172,46 @@
586 rfe; \
587 .set pop
588
589 +#elif defined(CONFIG_BCM4710) || defined(CONFIG_BCM4704)
590 +
591 +#define RESTORE_SOME \
592 + .set push; \
593 + .set reorder; \
594 + mfc0 t0, CP0_STATUS; \
595 + .set pop; \
596 + ori t0, 0x1f; \
597 + xori t0, 0x1f; \
598 + mtc0 t0, CP0_STATUS; \
599 + li v1, 0xff00; \
600 + and t0, v1; \
601 + lw v0, PT_STATUS(sp); \
602 + nor v1, $0, v1; \
603 + and v0, v1; \
604 + or v0, t0; \
605 + ori v1, v0, ST0_IE; \
606 + xori v1, v1, ST0_IE; \
607 + mtc0 v1, CP0_STATUS; \
608 + mtc0 v0, CP0_STATUS; \
609 + lw v1, PT_EPC(sp); \
610 + mtc0 v1, CP0_EPC; \
611 + lw $31, PT_R31(sp); \
612 + lw $28, PT_R28(sp); \
613 + lw $25, PT_R25(sp); \
614 + lw $7, PT_R7(sp); \
615 + lw $6, PT_R6(sp); \
616 + lw $5, PT_R5(sp); \
617 + lw $4, PT_R4(sp); \
618 + lw $3, PT_R3(sp); \
619 + lw $2, PT_R2(sp)
620 +
621 +#define RESTORE_SP_AND_RET \
622 + lw sp, PT_R29(sp); \
623 + nop; \
624 + nop; \
625 + .set mips3; \
626 + eret; \
627 + .set mips0
628 +
629 #else
630
631 #define RESTORE_SOME \