add initial support for the crisarchitecture used on foxboards to openwrt
[openwrt/svn-archive/archive.git] / target / linux / etrax-2.6 / patches / cris / 006-gcc-4.patch
1 diff -urN linux-2.6.19.2.orig/arch/cris/arch-v10/lib/memset.c linux-2.6.19.2/arch/cris/arch-v10/lib/memset.c
2 --- linux-2.6.19.2.orig/arch/cris/arch-v10/lib/memset.c 2007-05-20 01:46:35.000000000 +0200
3 +++ linux-2.6.19.2/arch/cris/arch-v10/lib/memset.c 2007-05-20 01:51:47.000000000 +0200
4 @@ -29,224 +29,21 @@
5
6 #include <linux/types.h>
7
8 -/* No, there's no macro saying 12*4, since it is "hard" to get it into
9 - the asm in a good way. Thus better to expose the problem everywhere.
10 - */
11
12 -/* Assuming 1 cycle per dword written or read (ok, not really true), and
13 - one per instruction, then 43+3*(n/48-1) <= 24+24*(n/48-1)
14 - so n >= 45.7; n >= 0.9; we win on the first full 48-byte block to set. */
15 -
16 -#define ZERO_BLOCK_SIZE (1*12*4)
17 -
18 -void *memset(void *pdst,
19 - int c,
20 - size_t plen)
21 +/**
22 + * memset - Fill a region of memory with the given value
23 + * @s: Pointer to the start of the area.
24 + * @c: The byte to fill the area with
25 + * @count: The size of the area.
26 + *
27 + * Do not use memset() to access IO space, use memset_io() instead.
28 + */
29 +void *memset(void *s, int c, size_t count)
30 {
31 - /* Ok. Now we want the parameters put in special registers.
32 - Make sure the compiler is able to make something useful of this. */
33 -
34 - register char *return_dst __asm__ ("r10") = pdst;
35 - register int n __asm__ ("r12") = plen;
36 - register int lc __asm__ ("r11") = c;
37 -
38 - /* Most apps use memset sanely. Only those memsetting about 3..4
39 - bytes or less get penalized compared to the generic implementation
40 - - and that's not really sane use. */
41 -
42 - /* Ugh. This is fragile at best. Check with newer GCC releases, if
43 - they compile cascaded "x |= x << 8" sanely! */
44 - __asm__("movu.b %0,$r13\n\t"
45 - "lslq 8,$r13\n\t"
46 - "move.b %0,$r13\n\t"
47 - "move.d $r13,%0\n\t"
48 - "lslq 16,$r13\n\t"
49 - "or.d $r13,%0"
50 - : "=r" (lc) : "0" (lc) : "r13");
51 -
52 - {
53 - register char *dst __asm__ ("r13") = pdst;
54 -
55 - /* This is NONPORTABLE, but since this whole routine is */
56 - /* grossly nonportable that doesn't matter. */
57 -
58 - if (((unsigned long) pdst & 3) != 0
59 - /* Oops! n=0 must be a legal call, regardless of alignment. */
60 - && n >= 3)
61 - {
62 - if ((unsigned long)dst & 1)
63 - {
64 - *dst = (char) lc;
65 - n--;
66 - dst++;
67 - }
68 -
69 - if ((unsigned long)dst & 2)
70 - {
71 - *(short *)dst = lc;
72 - n -= 2;
73 - dst += 2;
74 - }
75 - }
76 -
77 - /* Now the fun part. For the threshold value of this, check the equation
78 - above. */
79 - /* Decide which copying method to use. */
80 - if (n >= ZERO_BLOCK_SIZE)
81 - {
82 - /* For large copies we use 'movem' */
83 -
84 - /* It is not optimal to tell the compiler about clobbering any
85 - registers; that will move the saving/restoring of those registers
86 - to the function prologue/epilogue, and make non-movem sizes
87 - suboptimal.
88 -
89 - This method is not foolproof; it assumes that the "asm reg"
90 - declarations at the beginning of the function really are used
91 - here (beware: they may be moved to temporary registers).
92 - This way, we do not have to save/move the registers around into
93 - temporaries; we can safely use them straight away.
94 -
95 - If you want to check that the allocation was right; then
96 - check the equalities in the first comment. It should say
97 - "r13=r13, r12=r12, r11=r11" */
98 - __asm__ volatile ("
99 - ;; Check that the following is true (same register names on
100 - ;; both sides of equal sign, as in r8=r8):
101 - ;; %0=r13, %1=r12, %4=r11
102 - ;;
103 - ;; Save the registers we'll clobber in the movem process
104 - ;; on the stack. Don't mention them to gcc, it will only be
105 - ;; upset.
106 - subq 11*4,$sp
107 - movem $r10,[$sp]
108 -
109 - move.d $r11,$r0
110 - move.d $r11,$r1
111 - move.d $r11,$r2
112 - move.d $r11,$r3
113 - move.d $r11,$r4
114 - move.d $r11,$r5
115 - move.d $r11,$r6
116 - move.d $r11,$r7
117 - move.d $r11,$r8
118 - move.d $r11,$r9
119 - move.d $r11,$r10
120 -
121 - ;; Now we've got this:
122 - ;; r13 - dst
123 - ;; r12 - n
124 -
125 - ;; Update n for the first loop
126 - subq 12*4,$r12
127 -0:
128 - subq 12*4,$r12
129 - bge 0b
130 - movem $r11,[$r13+]
131 -
132 - addq 12*4,$r12 ;; compensate for last loop underflowing n
133 -
134 - ;; Restore registers from stack
135 - movem [$sp+],$r10"
136 -
137 - /* Outputs */ : "=r" (dst), "=r" (n)
138 - /* Inputs */ : "0" (dst), "1" (n), "r" (lc));
139 -
140 - }
141 -
142 - /* Either we directly starts copying, using dword copying
143 - in a loop, or we copy as much as possible with 'movem'
144 - and then the last block (<44 bytes) is copied here.
145 - This will work since 'movem' will have updated src,dst,n. */
146 -
147 - while ( n >= 16 )
148 - {
149 - *((long*)dst)++ = lc;
150 - *((long*)dst)++ = lc;
151 - *((long*)dst)++ = lc;
152 - *((long*)dst)++ = lc;
153 - n -= 16;
154 - }
155 + char *xs = s;
156
157 - /* A switch() is definitely the fastest although it takes a LOT of code.
158 - * Particularly if you inline code this.
159 - */
160 - switch (n)
161 - {
162 - case 0:
163 - break;
164 - case 1:
165 - *(char*)dst = (char) lc;
166 - break;
167 - case 2:
168 - *(short*)dst = (short) lc;
169 - break;
170 - case 3:
171 - *((short*)dst)++ = (short) lc;
172 - *(char*)dst = (char) lc;
173 - break;
174 - case 4:
175 - *((long*)dst)++ = lc;
176 - break;
177 - case 5:
178 - *((long*)dst)++ = lc;
179 - *(char*)dst = (char) lc;
180 - break;
181 - case 6:
182 - *((long*)dst)++ = lc;
183 - *(short*)dst = (short) lc;
184 - break;
185 - case 7:
186 - *((long*)dst)++ = lc;
187 - *((short*)dst)++ = (short) lc;
188 - *(char*)dst = (char) lc;
189 - break;
190 - case 8:
191 - *((long*)dst)++ = lc;
192 - *((long*)dst)++ = lc;
193 - break;
194 - case 9:
195 - *((long*)dst)++ = lc;
196 - *((long*)dst)++ = lc;
197 - *(char*)dst = (char) lc;
198 - break;
199 - case 10:
200 - *((long*)dst)++ = lc;
201 - *((long*)dst)++ = lc;
202 - *(short*)dst = (short) lc;
203 - break;
204 - case 11:
205 - *((long*)dst)++ = lc;
206 - *((long*)dst)++ = lc;
207 - *((short*)dst)++ = (short) lc;
208 - *(char*)dst = (char) lc;
209 - break;
210 - case 12:
211 - *((long*)dst)++ = lc;
212 - *((long*)dst)++ = lc;
213 - *((long*)dst)++ = lc;
214 - break;
215 - case 13:
216 - *((long*)dst)++ = lc;
217 - *((long*)dst)++ = lc;
218 - *((long*)dst)++ = lc;
219 - *(char*)dst = (char) lc;
220 - break;
221 - case 14:
222 - *((long*)dst)++ = lc;
223 - *((long*)dst)++ = lc;
224 - *((long*)dst)++ = lc;
225 - *(short*)dst = (short) lc;
226 - break;
227 - case 15:
228 - *((long*)dst)++ = lc;
229 - *((long*)dst)++ = lc;
230 - *((long*)dst)++ = lc;
231 - *((short*)dst)++ = (short) lc;
232 - *(char*)dst = (char) lc;
233 - break;
234 - }
235 - }
236 + while (count--)
237 + *xs++ = c;
238 + return s;
239 +}
240
241 - return return_dst; /* destination pointer. */
242 -} /* memset() */
243 diff -urN linux-2.6.19.2.orig/arch/cris/arch-v10/lib/string.c linux-2.6.19.2/arch/cris/arch-v10/lib/string.c
244 --- linux-2.6.19.2.orig/arch/cris/arch-v10/lib/string.c 2007-05-20 01:46:35.000000000 +0200
245 +++ linux-2.6.19.2/arch/cris/arch-v10/lib/string.c 2007-05-20 01:51:19.000000000 +0200
246 @@ -33,193 +33,21 @@
247
248 #include <linux/types.h>
249
250 -void *memcpy(void *pdst,
251 - const void *psrc,
252 - size_t pn)
253 + /**
254 + * memcpy - Copy one area of memory to another
255 + * @dest: Where to copy to
256 + * @src: Where to copy from
257 + * @count: The size of the area.
258 + *
259 + * You should not use this function to access IO space, use memcpy_toio()
260 + * or memcpy_fromio() instead.
261 + */
262 +void *memcpy(void *dest, const void *src, size_t count)
263 {
264 - /* Ok. Now we want the parameters put in special registers.
265 - Make sure the compiler is able to make something useful of this.
266 - As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
267 + char *tmp = dest;
268 + const char *s = src;
269
270 - If gcc was allright, it really would need no temporaries, and no
271 - stack space to save stuff on. */
272 -
273 - register void *return_dst __asm__ ("r10") = pdst;
274 - register char *dst __asm__ ("r13") = pdst;
275 - register const char *src __asm__ ("r11") = psrc;
276 - register int n __asm__ ("r12") = pn;
277 -
278 -
279 - /* When src is aligned but not dst, this makes a few extra needless
280 - cycles. I believe it would take as many to check that the
281 - re-alignment was unnecessary. */
282 - if (((unsigned long) dst & 3) != 0
283 - /* Don't align if we wouldn't copy more than a few bytes; so we
284 - don't have to check further for overflows. */
285 - && n >= 3)
286 - {
287 - if ((unsigned long) dst & 1)
288 - {
289 - n--;
290 - *(char*)dst = *(char*)src;
291 - src++;
292 - dst++;
293 - }
294 -
295 - if ((unsigned long) dst & 2)
296 - {
297 - n -= 2;
298 - *(short*)dst = *(short*)src;
299 - src += 2;
300 - dst += 2;
301 - }
302 - }
303 -
304 - /* Decide which copying method to use. */
305 - if (n >= 44*2) /* Break even between movem and
306 - move16 is at 38.7*2, but modulo 44. */
307 - {
308 - /* For large copies we use 'movem' */
309 -
310 - /* It is not optimal to tell the compiler about clobbering any
311 - registers; that will move the saving/restoring of those registers
312 - to the function prologue/epilogue, and make non-movem sizes
313 - suboptimal.
314 -
315 - This method is not foolproof; it assumes that the "asm reg"
316 - declarations at the beginning of the function really are used
317 - here (beware: they may be moved to temporary registers).
318 - This way, we do not have to save/move the registers around into
319 - temporaries; we can safely use them straight away.
320 -
321 - If you want to check that the allocation was right; then
322 - check the equalities in the first comment. It should say
323 - "r13=r13, r11=r11, r12=r12" */
324 - __asm__ volatile ("
325 - ;; Check that the following is true (same register names on
326 - ;; both sides of equal sign, as in r8=r8):
327 - ;; %0=r13, %1=r11, %2=r12
328 - ;;
329 - ;; Save the registers we'll use in the movem process
330 - ;; on the stack.
331 - subq 11*4,$sp
332 - movem $r10,[$sp]
333 -
334 - ;; Now we've got this:
335 - ;; r11 - src
336 - ;; r13 - dst
337 - ;; r12 - n
338 -
339 - ;; Update n for the first loop
340 - subq 44,$r12
341 -0:
342 - movem [$r11+],$r10
343 - subq 44,$r12
344 - bge 0b
345 - movem $r10,[$r13+]
346 -
347 - addq 44,$r12 ;; compensate for last loop underflowing n
348 -
349 - ;; Restore registers from stack
350 - movem [$sp+],$r10"
351 -
352 - /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n)
353 - /* Inputs */ : "0" (dst), "1" (src), "2" (n));
354 -
355 - }
356 -
357 - /* Either we directly starts copying, using dword copying
358 - in a loop, or we copy as much as possible with 'movem'
359 - and then the last block (<44 bytes) is copied here.
360 - This will work since 'movem' will have updated src,dst,n. */
361 -
362 - while ( n >= 16 )
363 - {
364 - *((long*)dst)++ = *((long*)src)++;
365 - *((long*)dst)++ = *((long*)src)++;
366 - *((long*)dst)++ = *((long*)src)++;
367 - *((long*)dst)++ = *((long*)src)++;
368 - n -= 16;
369 - }
370 -
371 - /* A switch() is definitely the fastest although it takes a LOT of code.
372 - * Particularly if you inline code this.
373 - */
374 - switch (n)
375 - {
376 - case 0:
377 - break;
378 - case 1:
379 - *(char*)dst = *(char*)src;
380 - break;
381 - case 2:
382 - *(short*)dst = *(short*)src;
383 - break;
384 - case 3:
385 - *((short*)dst)++ = *((short*)src)++;
386 - *(char*)dst = *(char*)src;
387 - break;
388 - case 4:
389 - *((long*)dst)++ = *((long*)src)++;
390 - break;
391 - case 5:
392 - *((long*)dst)++ = *((long*)src)++;
393 - *(char*)dst = *(char*)src;
394 - break;
395 - case 6:
396 - *((long*)dst)++ = *((long*)src)++;
397 - *(short*)dst = *(short*)src;
398 - break;
399 - case 7:
400 - *((long*)dst)++ = *((long*)src)++;
401 - *((short*)dst)++ = *((short*)src)++;
402 - *(char*)dst = *(char*)src;
403 - break;
404 - case 8:
405 - *((long*)dst)++ = *((long*)src)++;
406 - *((long*)dst)++ = *((long*)src)++;
407 - break;
408 - case 9:
409 - *((long*)dst)++ = *((long*)src)++;
410 - *((long*)dst)++ = *((long*)src)++;
411 - *(char*)dst = *(char*)src;
412 - break;
413 - case 10:
414 - *((long*)dst)++ = *((long*)src)++;
415 - *((long*)dst)++ = *((long*)src)++;
416 - *(short*)dst = *(short*)src;
417 - break;
418 - case 11:
419 - *((long*)dst)++ = *((long*)src)++;
420 - *((long*)dst)++ = *((long*)src)++;
421 - *((short*)dst)++ = *((short*)src)++;
422 - *(char*)dst = *(char*)src;
423 - break;
424 - case 12:
425 - *((long*)dst)++ = *((long*)src)++;
426 - *((long*)dst)++ = *((long*)src)++;
427 - *((long*)dst)++ = *((long*)src)++;
428 - break;
429 - case 13:
430 - *((long*)dst)++ = *((long*)src)++;
431 - *((long*)dst)++ = *((long*)src)++;
432 - *((long*)dst)++ = *((long*)src)++;
433 - *(char*)dst = *(char*)src;
434 - break;
435 - case 14:
436 - *((long*)dst)++ = *((long*)src)++;
437 - *((long*)dst)++ = *((long*)src)++;
438 - *((long*)dst)++ = *((long*)src)++;
439 - *(short*)dst = *(short*)src;
440 - break;
441 - case 15:
442 - *((long*)dst)++ = *((long*)src)++;
443 - *((long*)dst)++ = *((long*)src)++;
444 - *((long*)dst)++ = *((long*)src)++;
445 - *((short*)dst)++ = *((short*)src)++;
446 - *(char*)dst = *(char*)src;
447 - break;
448 - }
449 -
450 - return return_dst; /* destination pointer. */
451 -} /* memcpy() */
452 + while (count--)
453 + *tmp++ = *s++;
454 + return dest;
455 +}
456 diff -urN linux-2.6.19.2.orig/arch/cris/arch-v10/lib/usercopy.c linux-2.6.19.2/arch/cris/arch-v10/lib/usercopy.c
457 --- linux-2.6.19.2.orig/arch/cris/arch-v10/lib/usercopy.c 2007-05-16 22:11:26.000000000 +0200
458 +++ linux-2.6.19.2/arch/cris/arch-v10/lib/usercopy.c 2007-05-16 23:17:41.000000000 +0200
459 @@ -88,63 +88,38 @@
460 If you want to check that the allocation was right; then
461 check the equalities in the first comment. It should say
462 "r13=r13, r11=r11, r12=r12". */
463 - __asm__ volatile ("\
464 - .ifnc %0%1%2%3,$r13$r11$r12$r10 \n\
465 - .err \n\
466 - .endif \n\
467 -
468 - ;; Save the registers we'll use in the movem process
469 - ;; on the stack.
470 - subq 11*4,$sp
471 - movem $r10,[$sp]
472 -
473 - ;; Now we've got this:
474 - ;; r11 - src
475 - ;; r13 - dst
476 - ;; r12 - n
477 -
478 - ;; Update n for the first loop
479 - subq 44,$r12
480 -
481 -; Since the noted PC of a faulting instruction in a delay-slot of a taken
482 -; branch, is that of the branch target, we actually point at the from-movem
483 -; for this case. There is no ambiguity here; if there was a fault in that
484 -; instruction (meaning a kernel oops), the faulted PC would be the address
485 -; after *that* movem.
486 -
487 -0:
488 - movem [$r11+],$r10
489 - subq 44,$r12
490 - bge 0b
491 - movem $r10,[$r13+]
492 -1:
493 - addq 44,$r12 ;; compensate for last loop underflowing n
494 -
495 - ;; Restore registers from stack
496 - movem [$sp+],$r10
497 -2:
498 - .section .fixup,\"ax\"
499 -
500 -; To provide a correct count in r10 of bytes that failed to be copied,
501 -; we jump back into the loop if the loop-branch was taken. There is no
502 -; performance penalty for sany use; the program will segfault soon enough.
503 -
504 -3:
505 - move.d [$sp],$r10
506 - addq 44,$r10
507 - move.d $r10,[$sp]
508 - jump 0b
509 -4:
510 - movem [$sp+],$r10
511 - addq 44,$r10
512 - addq 44,$r12
513 - jump 2b
514 -
515 - .previous
516 - .section __ex_table,\"a\"
517 - .dword 0b,3b
518 - .dword 1b,4b
519 - .previous"
520 + __asm__ volatile (
521 + ".ifnc %0%1%2%3,$r13$r11$r12$r10 \n\t"
522 + ".err \n\t"
523 + ".endif \n\t"
524 + "subq 11*4,$sp\n\t"
525 + "movem $r10,[$sp]\n\t"
526 + "subq 44,$r12\n\t"
527 + "0:\n\t"
528 + "movem [$r11+],$r10\n\t"
529 + "subq 44,$r12\n\t"
530 + "bge 0b\n\t"
531 + "movem $r10,[$r13+]\n\t"
532 + "1:\n\t"
533 + "addq 44,$r12 \n\t"
534 + "movem [$sp+],$r10\n\t"
535 + "2:\n\t"
536 + ".section .fixup,\"ax\"\n\t"
537 + "3:\n\t"
538 + "move.d [$sp],$r10\n\t"
539 + "addq 44,$r10\n\t"
540 + "move.d $r10,[$sp]\n\t"
541 + "jump 0b\n\t"
542 + "4:\n\t"
543 + "movem [$sp+],$r10\n\t"
544 + "addq 44,$r10\n\t"
545 + "addq 44,$r12\n\t"
546 + "jump 2b\n\t"
547 + ".previous\n\t"
548 + ".section __ex_table,\"a\"\n\t"
549 + ".dword 0b,3b\n\t"
550 + ".dword 1b,4b\n\t"
551 + ".previous\n\t"
552
553 /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n), "=r" (retn)
554 /* Inputs */ : "0" (dst), "1" (src), "2" (n), "3" (retn));
555 @@ -253,60 +228,32 @@
556 If you want to check that the allocation was right; then
557 check the equalities in the first comment. It should say
558 "r13=r13, r11=r11, r12=r12" */
559 - __asm__ volatile ("
560 - .ifnc %0%1%2%3,$r13$r11$r12$r10 \n\
561 - .err \n\
562 - .endif \n\
563 -
564 - ;; Save the registers we'll use in the movem process
565 - ;; on the stack.
566 - subq 11*4,$sp
567 - movem $r10,[$sp]
568 -
569 - ;; Now we've got this:
570 - ;; r11 - src
571 - ;; r13 - dst
572 - ;; r12 - n
573 -
574 - ;; Update n for the first loop
575 - subq 44,$r12
576 -0:
577 - movem [$r11+],$r10
578 -1:
579 - subq 44,$r12
580 - bge 0b
581 - movem $r10,[$r13+]
582 -
583 - addq 44,$r12 ;; compensate for last loop underflowing n
584 -
585 - ;; Restore registers from stack
586 - movem [$sp+],$r10
587 -4:
588 - .section .fixup,\"ax\"
589 -
590 -;; Do not jump back into the loop if we fail. For some uses, we get a
591 -;; page fault somewhere on the line. Without checking for page limits,
592 -;; we don't know where, but we need to copy accurately and keep an
593 -;; accurate count; not just clear the whole line. To do that, we fall
594 -;; down in the code below, proceeding with smaller amounts. It should
595 -;; be kept in mind that we have to cater to code like what at one time
596 -;; was in fs/super.c:
597 -;; i = size - copy_from_user((void *)page, data, size);
598 -;; which would cause repeated faults while clearing the remainder of
599 -;; the SIZE bytes at PAGE after the first fault.
600 -;; A caveat here is that we must not fall through from a failing page
601 -;; to a valid page.
602 -
603 -3:
604 - movem [$sp+],$r10
605 - addq 44,$r12 ;; Get back count before faulting point.
606 - subq 44,$r11 ;; Get back pointer to faulting movem-line.
607 - jump 4b ;; Fall through, pretending the fault didn't happen.
608 -
609 - .previous
610 - .section __ex_table,\"a\"
611 - .dword 1b,3b
612 - .previous"
613 + __asm__ volatile (
614 + ".ifnc %0%1%2%3,$r13$r11$r12$r10 \n\t"
615 + ".err \n\t"
616 + ".endif \n\t"
617 + "subq 11*4,$sp\n\t"
618 + "movem $r10,[$sp]\n\t"
619 + "subq 44,$r12\n\t"
620 + "0:\n\t"
621 + "movem [$r11+],$r10\n\t"
622 + "1:\n\t"
623 + "subq 44,$r12\n\t"
624 + "bge 0b\n\t"
625 + "movem $r10,[$r13+]\n\t"
626 + "addq 44,$r12 \n\t"
627 + "movem [$sp+],$r10\n\t"
628 + "4:\n\t"
629 + ".section .fixup,\"ax\"\n\t"
630 + "3:\n\t"
631 + "movem [$sp+],$r10\n\t"
632 + "addq 44,$r12\n\t"
633 + "subq 44,$r11\n\t"
634 + "jump 4b \n\t"
635 + ".previous\n\t"
636 + ".section __ex_table,\"a\"\n\t"
637 + ".dword 1b,3b\n\t"
638 + ".previous\n\t"
639
640 /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n), "=r" (retn)
641 /* Inputs */ : "0" (dst), "1" (src), "2" (n), "3" (retn));
642 @@ -425,66 +372,50 @@
643 If you want to check that the allocation was right; then
644 check the equalities in the first comment. It should say
645 something like "r13=r13, r11=r11, r12=r12". */
646 - __asm__ volatile ("
647 - .ifnc %0%1%2,$r13$r12$r10 \n\
648 - .err \n\
649 - .endif \n\
650 -
651 - ;; Save the registers we'll clobber in the movem process
652 - ;; on the stack. Don't mention them to gcc, it will only be
653 - ;; upset.
654 - subq 11*4,$sp
655 - movem $r10,[$sp]
656 -
657 - clear.d $r0
658 - clear.d $r1
659 - clear.d $r2
660 - clear.d $r3
661 - clear.d $r4
662 - clear.d $r5
663 - clear.d $r6
664 - clear.d $r7
665 - clear.d $r8
666 - clear.d $r9
667 - clear.d $r10
668 - clear.d $r11
669 -
670 - ;; Now we've got this:
671 - ;; r13 - dst
672 - ;; r12 - n
673 -
674 - ;; Update n for the first loop
675 - subq 12*4,$r12
676 -0:
677 - subq 12*4,$r12
678 - bge 0b
679 - movem $r11,[$r13+]
680 -1:
681 - addq 12*4,$r12 ;; compensate for last loop underflowing n
682 -
683 - ;; Restore registers from stack
684 - movem [$sp+],$r10
685 -2:
686 - .section .fixup,\"ax\"
687 -3:
688 - move.d [$sp],$r10
689 - addq 12*4,$r10
690 - move.d $r10,[$sp]
691 - clear.d $r10
692 - jump 0b
693 -
694 -4:
695 - movem [$sp+],$r10
696 - addq 12*4,$r10
697 - addq 12*4,$r12
698 - jump 2b
699 -
700 - .previous
701 - .section __ex_table,\"a\"
702 - .dword 0b,3b
703 - .dword 1b,4b
704 - .previous"
705 -
706 + __asm__ volatile (
707 + ".ifnc %0%1%2,$r13$r12$r10\n\t"
708 + ".err \n\t"
709 + ".endif\n\t"
710 + "subq 11*4,$sp\n\t"
711 + "movem $r10,[$sp]\n\t"
712 + "clear.d $r0\n\t"
713 + "clear.d $r1\n\t"
714 + "clear.d $r2\n\t"
715 + "clear.d $r3\n\t"
716 + "clear.d $r4\n\t"
717 + "clear.d $r5\n\t"
718 + "clear.d $r6\n\t"
719 + "clear.d $r7\n\t"
720 + "clear.d $r8\n\t"
721 + "clear.d $r9\n\t"
722 + "clear.d $r10\n\t"
723 + "clear.d $r11\n\t"
724 + "subq 12*4,$r12\n\t"
725 + "0:\n\t"
726 + "subq 12*4,$r12\n\t"
727 + "bge 0b\n\t"
728 + "movem $r11,[$r13+]\n\t"
729 + "1: \n\t"
730 + "addq 12*4,$r12 \n\t"
731 + "movem [$sp+],$r10\n\t"
732 + "2:\n\t"
733 + ".section .fixup,\"ax\"\n\t"
734 + "3:\n\t"
735 + "move.d [$sp],$r10\n\t"
736 + "addq 12*4,$r10\n\t"
737 + "move.d $r10,[$sp]\n\t"
738 + "clear.d $r10\n\t"
739 + "jump 0b\n\t"
740 + "4:\n\t"
741 + "movem [$sp+],$r10\n\t"
742 + "addq 12*4,$r10\n\t"
743 + "addq 12*4,$r12\n\t"
744 + "jump 2b\n\t"
745 + ".previous\n\t"
746 + ".section __ex_table,\"a\"\n\t"
747 + ".dword 0b,3b\n\t"
748 + ".dword 1b,4b\n\t"
749 + ".previous\n\t"
750 /* Outputs */ : "=r" (dst), "=r" (n), "=r" (retn)
751 /* Inputs */ : "0" (dst), "1" (n), "2" (retn)
752 /* Clobber */ : "r11");