brcm2708: update linux 4.4 patches to latest version
[openwrt/openwrt.git] / target / linux / brcm2708 / patches-4.4 / 0079-Improve-__copy_to_user-and-__copy_from_user-performa.patch
1 From 93aae11e6e1bcd5178685f85938e7c339d91a95e Mon Sep 17 00:00:00 2001
2 From: Phil Elwell <phil@raspberrypi.org>
3 Date: Mon, 13 Oct 2014 11:47:53 +0100
4 Subject: [PATCH 079/304] Improve __copy_to_user and __copy_from_user
5 performance
6
7 Provide a __copy_from_user that uses memcpy. On BCM2708, use
8 optimised memcpy/memmove/memcmp/memset implementations.
9
10 arch/arm: Add mmiocpy/set aliases for memcpy/set
11
12 See: https://github.com/raspberrypi/linux/issues/1082
13 ---
14 arch/arm/include/asm/string.h | 5 +
15 arch/arm/include/asm/uaccess.h | 3 +
16 arch/arm/lib/Makefile | 15 +-
17 arch/arm/lib/arm-mem.h | 159 ++++++++++++
18 arch/arm/lib/copy_from_user.S | 4 +-
19 arch/arm/lib/exports_rpi.c | 37 +++
20 arch/arm/lib/memcmp_rpi.S | 285 +++++++++++++++++++++
21 arch/arm/lib/memcpy_rpi.S | 61 +++++
22 arch/arm/lib/memcpymove.h | 506 +++++++++++++++++++++++++++++++++++++
23 arch/arm/lib/memmove_rpi.S | 61 +++++
24 arch/arm/lib/memset_rpi.S | 123 +++++++++
25 arch/arm/lib/uaccess_with_memcpy.c | 112 +++++++-
26 12 files changed, 1365 insertions(+), 6 deletions(-)
27 create mode 100644 arch/arm/lib/arm-mem.h
28 create mode 100644 arch/arm/lib/exports_rpi.c
29 create mode 100644 arch/arm/lib/memcmp_rpi.S
30 create mode 100644 arch/arm/lib/memcpy_rpi.S
31 create mode 100644 arch/arm/lib/memcpymove.h
32 create mode 100644 arch/arm/lib/memmove_rpi.S
33 create mode 100644 arch/arm/lib/memset_rpi.S
34
35 --- a/arch/arm/include/asm/string.h
36 +++ b/arch/arm/include/asm/string.h
37 @@ -24,6 +24,11 @@ extern void * memchr(const void *, int,
38 #define __HAVE_ARCH_MEMSET
39 extern void * memset(void *, int, __kernel_size_t);
40
41 +#ifdef CONFIG_MACH_BCM2708
42 +#define __HAVE_ARCH_MEMCMP
43 +extern int memcmp(const void *, const void *, size_t);
44 +#endif
45 +
46 extern void __memzero(void *ptr, __kernel_size_t n);
47
48 #define memset(p,v,n) \
49 --- a/arch/arm/include/asm/uaccess.h
50 +++ b/arch/arm/include/asm/uaccess.h
51 @@ -493,6 +493,9 @@ do { \
52 extern unsigned long __must_check
53 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
54
55 +extern unsigned long __must_check
56 +__copy_from_user_std(void *to, const void __user *from, unsigned long n);
57 +
58 static inline unsigned long __must_check
59 __copy_from_user(void *to, const void __user *from, unsigned long n)
60 {
61 --- a/arch/arm/lib/Makefile
62 +++ b/arch/arm/lib/Makefile
63 @@ -6,9 +6,8 @@
64
65 lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
66 csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
67 - delay.o delay-loop.o findbit.o memchr.o memcpy.o \
68 - memmove.o memset.o memzero.o setbit.o \
69 - strchr.o strrchr.o \
70 + delay.o delay-loop.o findbit.o memchr.o memzero.o \
71 + setbit.o strchr.o strrchr.o \
72 testchangebit.o testclearbit.o testsetbit.o \
73 ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
74 ucmpdi2.o lib1funcs.o div64.o \
75 @@ -18,6 +17,16 @@ lib-y := backtrace.o changebit.o csumip
76 mmu-y := clear_user.o copy_page.o getuser.o putuser.o \
77 copy_from_user.o copy_to_user.o
78
79 +# Choose optimised implementations for Raspberry Pi
80 +ifeq ($(CONFIG_MACH_BCM2708),y)
81 + CFLAGS_uaccess_with_memcpy.o += -DCOPY_FROM_USER_THRESHOLD=1600
82 + CFLAGS_uaccess_with_memcpy.o += -DCOPY_TO_USER_THRESHOLD=672
83 + obj-$(CONFIG_MODULES) += exports_rpi.o
84 + lib-y += memcpy_rpi.o memmove_rpi.o memset_rpi.o memcmp_rpi.o
85 +else
86 + lib-y += memcpy.o memmove.o memset.o
87 +endif
88 +
89 # using lib_ here won't override already available weak symbols
90 obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o
91
92 --- /dev/null
93 +++ b/arch/arm/lib/arm-mem.h
94 @@ -0,0 +1,159 @@
95 +/*
96 +Copyright (c) 2013, Raspberry Pi Foundation
97 +Copyright (c) 2013, RISC OS Open Ltd
98 +All rights reserved.
99 +
100 +Redistribution and use in source and binary forms, with or without
101 +modification, are permitted provided that the following conditions are met:
102 + * Redistributions of source code must retain the above copyright
103 + notice, this list of conditions and the following disclaimer.
104 + * Redistributions in binary form must reproduce the above copyright
105 + notice, this list of conditions and the following disclaimer in the
106 + documentation and/or other materials provided with the distribution.
107 + * Neither the name of the copyright holder nor the
108 + names of its contributors may be used to endorse or promote products
109 + derived from this software without specific prior written permission.
110 +
111 +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
112 +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
113 +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
114 +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
115 +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
116 +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
117 +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
118 +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
119 +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
120 +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
121 +*/
122 +
123 +.macro myfunc fname
124 + .func fname
125 + .global fname
126 +fname:
127 +.endm
128 +
129 +.macro preload_leading_step1 backwards, ptr, base
130 +/* If the destination is already 16-byte aligned, then we need to preload
131 + * between 0 and prefetch_distance (inclusive) cache lines ahead so there
132 + * are no gaps when the inner loop starts.
133 + */
134 + .if backwards
135 + sub ptr, base, #1
136 + bic ptr, ptr, #31
137 + .else
138 + bic ptr, base, #31
139 + .endif
140 + .set OFFSET, 0
141 + .rept prefetch_distance+1
142 + pld [ptr, #OFFSET]
143 + .if backwards
144 + .set OFFSET, OFFSET-32
145 + .else
146 + .set OFFSET, OFFSET+32
147 + .endif
148 + .endr
149 +.endm
150 +
151 +.macro preload_leading_step2 backwards, ptr, base, leading_bytes, tmp
152 +/* However, if the destination is not 16-byte aligned, we may need to
153 + * preload one more cache line than that. The question we need to ask is:
154 + * are the leading bytes more than the amount by which the source
155 + * pointer will be rounded down for preloading, and if so, by how many
156 + * cache lines?
157 + */
158 + .if backwards
159 +/* Here we compare against how many bytes we are into the
160 + * cache line, counting down from the highest such address.
161 + * Effectively, we want to calculate
162 + * leading_bytes = dst&15
163 + * cacheline_offset = 31-((src-leading_bytes-1)&31)
164 + * extra_needed = leading_bytes - cacheline_offset
165 + * and test if extra_needed is <= 0, or rearranging:
166 + * leading_bytes + (src-leading_bytes-1)&31 <= 31
167 + */
168 + mov tmp, base, lsl #32-5
169 + sbc tmp, tmp, leading_bytes, lsl #32-5
170 + adds tmp, tmp, leading_bytes, lsl #32-5
171 + bcc 61f
172 + pld [ptr, #-32*(prefetch_distance+1)]
173 + .else
174 +/* Effectively, we want to calculate
175 + * leading_bytes = (-dst)&15
176 + * cacheline_offset = (src+leading_bytes)&31
177 + * extra_needed = leading_bytes - cacheline_offset
178 + * and test if extra_needed is <= 0.
179 + */
180 + mov tmp, base, lsl #32-5
181 + add tmp, tmp, leading_bytes, lsl #32-5
182 + rsbs tmp, tmp, leading_bytes, lsl #32-5
183 + bls 61f
184 + pld [ptr, #32*(prefetch_distance+1)]
185 + .endif
186 +61:
187 +.endm
188 +
189 +.macro preload_trailing backwards, base, remain, tmp
190 + /* We need either 0, 1 or 2 extra preloads */
191 + .if backwards
192 + rsb tmp, base, #0
193 + mov tmp, tmp, lsl #32-5
194 + .else
195 + mov tmp, base, lsl #32-5
196 + .endif
197 + adds tmp, tmp, remain, lsl #32-5
198 + adceqs tmp, tmp, #0
199 + /* The instruction above has two effects: ensures Z is only
200 + * set if C was clear (so Z indicates that both shifted quantities
201 + * were 0), and clears C if Z was set (so C indicates that the sum
202 + * of the shifted quantities was greater and not equal to 32) */
203 + beq 82f
204 + .if backwards
205 + sub tmp, base, #1
206 + bic tmp, tmp, #31
207 + .else
208 + bic tmp, base, #31
209 + .endif
210 + bcc 81f
211 + .if backwards
212 + pld [tmp, #-32*(prefetch_distance+1)]
213 +81:
214 + pld [tmp, #-32*prefetch_distance]
215 + .else
216 + pld [tmp, #32*(prefetch_distance+2)]
217 +81:
218 + pld [tmp, #32*(prefetch_distance+1)]
219 + .endif
220 +82:
221 +.endm
222 +
223 +.macro preload_all backwards, narrow_case, shift, base, remain, tmp0, tmp1
224 + .if backwards
225 + sub tmp0, base, #1
226 + bic tmp0, tmp0, #31
227 + pld [tmp0]
228 + sub tmp1, base, remain, lsl #shift
229 + .else
230 + bic tmp0, base, #31
231 + pld [tmp0]
232 + add tmp1, base, remain, lsl #shift
233 + sub tmp1, tmp1, #1
234 + .endif
235 + bic tmp1, tmp1, #31
236 + cmp tmp1, tmp0
237 + beq 92f
238 + .if narrow_case
239 + /* In this case, all the data fits in either 1 or 2 cache lines */
240 + pld [tmp1]
241 + .else
242 +91:
243 + .if backwards
244 + sub tmp0, tmp0, #32
245 + .else
246 + add tmp0, tmp0, #32
247 + .endif
248 + cmp tmp0, tmp1
249 + pld [tmp0]
250 + bne 91b
251 + .endif
252 +92:
253 +.endm
254 --- a/arch/arm/lib/copy_from_user.S
255 +++ b/arch/arm/lib/copy_from_user.S
256 @@ -89,11 +89,13 @@
257
258 .text
259
260 -ENTRY(arm_copy_from_user)
261 +ENTRY(__copy_from_user_std)
262 +WEAK(arm_copy_from_user)
263
264 #include "copy_template.S"
265
266 ENDPROC(arm_copy_from_user)
267 +ENDPROC(__copy_from_user_std)
268
269 .pushsection .fixup,"ax"
270 .align 0
271 --- /dev/null
272 +++ b/arch/arm/lib/exports_rpi.c
273 @@ -0,0 +1,37 @@
274 +/**
275 + * Copyright (c) 2014, Raspberry Pi (Trading) Ltd.
276 + *
277 + * Redistribution and use in source and binary forms, with or without
278 + * modification, are permitted provided that the following conditions
279 + * are met:
280 + * 1. Redistributions of source code must retain the above copyright
281 + * notice, this list of conditions, and the following disclaimer,
282 + * without modification.
283 + * 2. Redistributions in binary form must reproduce the above copyright
284 + * notice, this list of conditions and the following disclaimer in the
285 + * documentation and/or other materials provided with the distribution.
286 + * 3. The names of the above-listed copyright holders may not be used
287 + * to endorse or promote products derived from this software without
288 + * specific prior written permission.
289 + *
290 + * ALTERNATIVELY, this software may be distributed under the terms of the
291 + * GNU General Public License ("GPL") version 2, as published by the Free
292 + * Software Foundation.
293 + *
294 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
295 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
296 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
297 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
298 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
299 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
300 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
301 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
302 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
303 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
304 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
305 + */
306 +
307 +#include <linux/kernel.h>
308 +#include <linux/module.h>
309 +
310 +EXPORT_SYMBOL(memcmp);
311 --- /dev/null
312 +++ b/arch/arm/lib/memcmp_rpi.S
313 @@ -0,0 +1,285 @@
314 +/*
315 +Copyright (c) 2013, Raspberry Pi Foundation
316 +Copyright (c) 2013, RISC OS Open Ltd
317 +All rights reserved.
318 +
319 +Redistribution and use in source and binary forms, with or without
320 +modification, are permitted provided that the following conditions are met:
321 + * Redistributions of source code must retain the above copyright
322 + notice, this list of conditions and the following disclaimer.
323 + * Redistributions in binary form must reproduce the above copyright
324 + notice, this list of conditions and the following disclaimer in the
325 + documentation and/or other materials provided with the distribution.
326 + * Neither the name of the copyright holder nor the
327 + names of its contributors may be used to endorse or promote products
328 + derived from this software without specific prior written permission.
329 +
330 +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
331 +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
332 +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
333 +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
334 +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
335 +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
336 +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
337 +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
338 +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
339 +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
340 +*/
341 +
342 +#include <linux/linkage.h>
343 +#include "arm-mem.h"
344 +
345 +/* Prevent the stack from becoming executable */
346 +#if defined(__linux__) && defined(__ELF__)
347 +.section .note.GNU-stack,"",%progbits
348 +#endif
349 +
350 + .text
351 + .arch armv6
352 + .object_arch armv4
353 + .arm
354 + .altmacro
355 + .p2align 2
356 +
357 +.macro memcmp_process_head unaligned
358 + .if unaligned
359 + ldr DAT0, [S_1], #4
360 + ldr DAT1, [S_1], #4
361 + ldr DAT2, [S_1], #4
362 + ldr DAT3, [S_1], #4
363 + .else
364 + ldmia S_1!, {DAT0, DAT1, DAT2, DAT3}
365 + .endif
366 + ldmia S_2!, {DAT4, DAT5, DAT6, DAT7}
367 +.endm
368 +
369 +.macro memcmp_process_tail
370 + cmp DAT0, DAT4
371 + cmpeq DAT1, DAT5
372 + cmpeq DAT2, DAT6
373 + cmpeq DAT3, DAT7
374 + bne 200f
375 +.endm
376 +
377 +.macro memcmp_leading_31bytes
378 + movs DAT0, OFF, lsl #31
379 + ldrmib DAT0, [S_1], #1
380 + ldrcsh DAT1, [S_1], #2
381 + ldrmib DAT4, [S_2], #1
382 + ldrcsh DAT5, [S_2], #2
383 + movpl DAT0, #0
384 + movcc DAT1, #0
385 + movpl DAT4, #0
386 + movcc DAT5, #0
387 + submi N, N, #1
388 + subcs N, N, #2
389 + cmp DAT0, DAT4
390 + cmpeq DAT1, DAT5
391 + bne 200f
392 + movs DAT0, OFF, lsl #29
393 + ldrmi DAT0, [S_1], #4
394 + ldrcs DAT1, [S_1], #4
395 + ldrcs DAT2, [S_1], #4
396 + ldrmi DAT4, [S_2], #4
397 + ldmcsia S_2!, {DAT5, DAT6}
398 + movpl DAT0, #0
399 + movcc DAT1, #0
400 + movcc DAT2, #0
401 + movpl DAT4, #0
402 + movcc DAT5, #0
403 + movcc DAT6, #0
404 + submi N, N, #4
405 + subcs N, N, #8
406 + cmp DAT0, DAT4
407 + cmpeq DAT1, DAT5
408 + cmpeq DAT2, DAT6
409 + bne 200f
410 + tst OFF, #16
411 + beq 105f
412 + memcmp_process_head 1
413 + sub N, N, #16
414 + memcmp_process_tail
415 +105:
416 +.endm
417 +
418 +.macro memcmp_trailing_15bytes unaligned
419 + movs N, N, lsl #29
420 + .if unaligned
421 + ldrcs DAT0, [S_1], #4
422 + ldrcs DAT1, [S_1], #4
423 + .else
424 + ldmcsia S_1!, {DAT0, DAT1}
425 + .endif
426 + ldrmi DAT2, [S_1], #4
427 + ldmcsia S_2!, {DAT4, DAT5}
428 + ldrmi DAT6, [S_2], #4
429 + movcc DAT0, #0
430 + movcc DAT1, #0
431 + movpl DAT2, #0
432 + movcc DAT4, #0
433 + movcc DAT5, #0
434 + movpl DAT6, #0
435 + cmp DAT0, DAT4
436 + cmpeq DAT1, DAT5
437 + cmpeq DAT2, DAT6
438 + bne 200f
439 + movs N, N, lsl #2
440 + ldrcsh DAT0, [S_1], #2
441 + ldrmib DAT1, [S_1]
442 + ldrcsh DAT4, [S_2], #2
443 + ldrmib DAT5, [S_2]
444 + movcc DAT0, #0
445 + movpl DAT1, #0
446 + movcc DAT4, #0
447 + movpl DAT5, #0
448 + cmp DAT0, DAT4
449 + cmpeq DAT1, DAT5
450 + bne 200f
451 +.endm
452 +
453 +.macro memcmp_long_inner_loop unaligned
454 +110:
455 + memcmp_process_head unaligned
456 + pld [S_2, #prefetch_distance*32 + 16]
457 + memcmp_process_tail
458 + memcmp_process_head unaligned
459 + pld [S_1, OFF]
460 + memcmp_process_tail
461 + subs N, N, #32
462 + bhs 110b
463 + /* Just before the final (prefetch_distance+1) 32-byte blocks,
464 + * deal with final preloads */
465 + preload_trailing 0, S_1, N, DAT0
466 + preload_trailing 0, S_2, N, DAT0
467 + add N, N, #(prefetch_distance+2)*32 - 16
468 +120:
469 + memcmp_process_head unaligned
470 + memcmp_process_tail
471 + subs N, N, #16
472 + bhs 120b
473 + /* Trailing words and bytes */
474 + tst N, #15
475 + beq 199f
476 + memcmp_trailing_15bytes unaligned
477 +199: /* Reached end without detecting a difference */
478 + mov a1, #0
479 + setend le
480 + pop {DAT1-DAT6, pc}
481 +.endm
482 +
483 +.macro memcmp_short_inner_loop unaligned
484 + subs N, N, #16 /* simplifies inner loop termination */
485 + blo 122f
486 +120:
487 + memcmp_process_head unaligned
488 + memcmp_process_tail
489 + subs N, N, #16
490 + bhs 120b
491 +122: /* Trailing words and bytes */
492 + tst N, #15
493 + beq 199f
494 + memcmp_trailing_15bytes unaligned
495 +199: /* Reached end without detecting a difference */
496 + mov a1, #0
497 + setend le
498 + pop {DAT1-DAT6, pc}
499 +.endm
500 +
501 +/*
502 + * int memcmp(const void *s1, const void *s2, size_t n);
503 + * On entry:
504 + * a1 = pointer to buffer 1
505 + * a2 = pointer to buffer 2
506 + * a3 = number of bytes to compare (as unsigned chars)
507 + * On exit:
508 + * a1 = >0/=0/<0 if s1 >/=/< s2
509 + */
510 +
511 +.set prefetch_distance, 2
512 +
513 +ENTRY(memcmp)
514 + S_1 .req a1
515 + S_2 .req a2
516 + N .req a3
517 + DAT0 .req a4
518 + DAT1 .req v1
519 + DAT2 .req v2
520 + DAT3 .req v3
521 + DAT4 .req v4
522 + DAT5 .req v5
523 + DAT6 .req v6
524 + DAT7 .req ip
525 + OFF .req lr
526 +
527 + push {DAT1-DAT6, lr}
528 + setend be /* lowest-addressed bytes are most significant */
529 +
530 + /* To preload ahead as we go, we need at least (prefetch_distance+2) 32-byte blocks */
531 + cmp N, #(prefetch_distance+3)*32 - 1
532 + blo 170f
533 +
534 + /* Long case */
535 + /* Adjust N so that the decrement instruction can also test for
536 + * inner loop termination. We want it to stop when there are
537 + * (prefetch_distance+1) complete blocks to go. */
538 + sub N, N, #(prefetch_distance+2)*32
539 + preload_leading_step1 0, DAT0, S_1
540 + preload_leading_step1 0, DAT1, S_2
541 + tst S_2, #31
542 + beq 154f
543 + rsb OFF, S_2, #0 /* no need to AND with 15 here */
544 + preload_leading_step2 0, DAT0, S_1, OFF, DAT2
545 + preload_leading_step2 0, DAT1, S_2, OFF, DAT2
546 + memcmp_leading_31bytes
547 +154: /* Second source now cacheline (32-byte) aligned; we have at
548 + * least one prefetch to go. */
549 + /* Prefetch offset is best selected such that it lies in the
550 + * first 8 of each 32 bytes - but it's just as easy to aim for
551 + * the first one */
552 + and OFF, S_1, #31
553 + rsb OFF, OFF, #32*prefetch_distance
554 + tst S_1, #3
555 + bne 140f
556 + memcmp_long_inner_loop 0
557 +140: memcmp_long_inner_loop 1
558 +
559 +170: /* Short case */
560 + teq N, #0
561 + beq 199f
562 + preload_all 0, 0, 0, S_1, N, DAT0, DAT1
563 + preload_all 0, 0, 0, S_2, N, DAT0, DAT1
564 + tst S_2, #3
565 + beq 174f
566 +172: subs N, N, #1
567 + blo 199f
568 + ldrb DAT0, [S_1], #1
569 + ldrb DAT4, [S_2], #1
570 + cmp DAT0, DAT4
571 + bne 200f
572 + tst S_2, #3
573 + bne 172b
574 +174: /* Second source now 4-byte aligned; we have 0 or more bytes to go */
575 + tst S_1, #3
576 + bne 140f
577 + memcmp_short_inner_loop 0
578 +140: memcmp_short_inner_loop 1
579 +
580 +200: /* Difference found: determine sign. */
581 + movhi a1, #1
582 + movlo a1, #-1
583 + setend le
584 + pop {DAT1-DAT6, pc}
585 +
586 + .unreq S_1
587 + .unreq S_2
588 + .unreq N
589 + .unreq DAT0
590 + .unreq DAT1
591 + .unreq DAT2
592 + .unreq DAT3
593 + .unreq DAT4
594 + .unreq DAT5
595 + .unreq DAT6
596 + .unreq DAT7
597 + .unreq OFF
598 +ENDPROC(memcmp)
599 --- /dev/null
600 +++ b/arch/arm/lib/memcpy_rpi.S
601 @@ -0,0 +1,61 @@
602 +/*
603 +Copyright (c) 2013, Raspberry Pi Foundation
604 +Copyright (c) 2013, RISC OS Open Ltd
605 +All rights reserved.
606 +
607 +Redistribution and use in source and binary forms, with or without
608 +modification, are permitted provided that the following conditions are met:
609 + * Redistributions of source code must retain the above copyright
610 + notice, this list of conditions and the following disclaimer.
611 + * Redistributions in binary form must reproduce the above copyright
612 + notice, this list of conditions and the following disclaimer in the
613 + documentation and/or other materials provided with the distribution.
614 + * Neither the name of the copyright holder nor the
615 + names of its contributors may be used to endorse or promote products
616 + derived from this software without specific prior written permission.
617 +
618 +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
619 +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
620 +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
621 +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
622 +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
623 +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
624 +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
625 +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
626 +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
627 +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
628 +*/
629 +
630 +#include <linux/linkage.h>
631 +#include "arm-mem.h"
632 +#include "memcpymove.h"
633 +
634 +/* Prevent the stack from becoming executable */
635 +#if defined(__linux__) && defined(__ELF__)
636 +.section .note.GNU-stack,"",%progbits
637 +#endif
638 +
639 + .text
640 + .arch armv6
641 + .object_arch armv4
642 + .arm
643 + .altmacro
644 + .p2align 2
645 +
646 +/*
647 + * void *memcpy(void * restrict s1, const void * restrict s2, size_t n);
648 + * On entry:
649 + * a1 = pointer to destination
650 + * a2 = pointer to source
651 + * a3 = number of bytes to copy
652 + * On exit:
653 + * a1 preserved
654 + */
655 +
656 +.set prefetch_distance, 3
657 +
658 +ENTRY(mmiocpy)
659 +ENTRY(memcpy)
660 + memcpy 0
661 +ENDPROC(memcpy)
662 +ENDPROC(mmiocpy)
663 --- /dev/null
664 +++ b/arch/arm/lib/memcpymove.h
665 @@ -0,0 +1,506 @@
666 +/*
667 +Copyright (c) 2013, Raspberry Pi Foundation
668 +Copyright (c) 2013, RISC OS Open Ltd
669 +All rights reserved.
670 +
671 +Redistribution and use in source and binary forms, with or without
672 +modification, are permitted provided that the following conditions are met:
673 + * Redistributions of source code must retain the above copyright
674 + notice, this list of conditions and the following disclaimer.
675 + * Redistributions in binary form must reproduce the above copyright
676 + notice, this list of conditions and the following disclaimer in the
677 + documentation and/or other materials provided with the distribution.
678 + * Neither the name of the copyright holder nor the
679 + names of its contributors may be used to endorse or promote products
680 + derived from this software without specific prior written permission.
681 +
682 +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
683 +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
684 +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
685 +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
686 +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
687 +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
688 +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
689 +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
690 +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
691 +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
692 +*/
693 +
694 +.macro unaligned_words backwards, align, use_pld, words, r0, r1, r2, r3, r4, r5, r6, r7, r8
695 + .if words == 1
696 + .if backwards
697 + mov r1, r0, lsl #32-align*8
698 + ldr r0, [S, #-4]!
699 + orr r1, r1, r0, lsr #align*8
700 + str r1, [D, #-4]!
701 + .else
702 + mov r0, r1, lsr #align*8
703 + ldr r1, [S, #4]!
704 + orr r0, r0, r1, lsl #32-align*8
705 + str r0, [D], #4
706 + .endif
707 + .elseif words == 2
708 + .if backwards
709 + ldr r1, [S, #-4]!
710 + mov r2, r0, lsl #32-align*8
711 + ldr r0, [S, #-4]!
712 + orr r2, r2, r1, lsr #align*8
713 + mov r1, r1, lsl #32-align*8
714 + orr r1, r1, r0, lsr #align*8
715 + stmdb D!, {r1, r2}
716 + .else
717 + ldr r1, [S, #4]!
718 + mov r0, r2, lsr #align*8
719 + ldr r2, [S, #4]!
720 + orr r0, r0, r1, lsl #32-align*8
721 + mov r1, r1, lsr #align*8
722 + orr r1, r1, r2, lsl #32-align*8
723 + stmia D!, {r0, r1}
724 + .endif
725 + .elseif words == 4
726 + .if backwards
727 + ldmdb S!, {r2, r3}
728 + mov r4, r0, lsl #32-align*8
729 + ldmdb S!, {r0, r1}
730 + orr r4, r4, r3, lsr #align*8
731 + mov r3, r3, lsl #32-align*8
732 + orr r3, r3, r2, lsr #align*8
733 + mov r2, r2, lsl #32-align*8
734 + orr r2, r2, r1, lsr #align*8
735 + mov r1, r1, lsl #32-align*8
736 + orr r1, r1, r0, lsr #align*8
737 + stmdb D!, {r1, r2, r3, r4}
738 + .else
739 + ldmib S!, {r1, r2}
740 + mov r0, r4, lsr #align*8
741 + ldmib S!, {r3, r4}
742 + orr r0, r0, r1, lsl #32-align*8
743 + mov r1, r1, lsr #align*8
744 + orr r1, r1, r2, lsl #32-align*8
745 + mov r2, r2, lsr #align*8
746 + orr r2, r2, r3, lsl #32-align*8
747 + mov r3, r3, lsr #align*8
748 + orr r3, r3, r4, lsl #32-align*8
749 + stmia D!, {r0, r1, r2, r3}
750 + .endif
751 + .elseif words == 8
752 + .if backwards
753 + ldmdb S!, {r4, r5, r6, r7}
754 + mov r8, r0, lsl #32-align*8
755 + ldmdb S!, {r0, r1, r2, r3}
756 + .if use_pld
757 + pld [S, OFF]
758 + .endif
759 + orr r8, r8, r7, lsr #align*8
760 + mov r7, r7, lsl #32-align*8
761 + orr r7, r7, r6, lsr #align*8
762 + mov r6, r6, lsl #32-align*8
763 + orr r6, r6, r5, lsr #align*8
764 + mov r5, r5, lsl #32-align*8
765 + orr r5, r5, r4, lsr #align*8
766 + mov r4, r4, lsl #32-align*8
767 + orr r4, r4, r3, lsr #align*8
768 + mov r3, r3, lsl #32-align*8
769 + orr r3, r3, r2, lsr #align*8
770 + mov r2, r2, lsl #32-align*8
771 + orr r2, r2, r1, lsr #align*8
772 + mov r1, r1, lsl #32-align*8
773 + orr r1, r1, r0, lsr #align*8
774 + stmdb D!, {r5, r6, r7, r8}
775 + stmdb D!, {r1, r2, r3, r4}
776 + .else
777 + ldmib S!, {r1, r2, r3, r4}
778 + mov r0, r8, lsr #align*8
779 + ldmib S!, {r5, r6, r7, r8}
780 + .if use_pld
781 + pld [S, OFF]
782 + .endif
783 + orr r0, r0, r1, lsl #32-align*8
784 + mov r1, r1, lsr #align*8
785 + orr r1, r1, r2, lsl #32-align*8
786 + mov r2, r2, lsr #align*8
787 + orr r2, r2, r3, lsl #32-align*8
788 + mov r3, r3, lsr #align*8
789 + orr r3, r3, r4, lsl #32-align*8
790 + mov r4, r4, lsr #align*8
791 + orr r4, r4, r5, lsl #32-align*8
792 + mov r5, r5, lsr #align*8
793 + orr r5, r5, r6, lsl #32-align*8
794 + mov r6, r6, lsr #align*8
795 + orr r6, r6, r7, lsl #32-align*8
796 + mov r7, r7, lsr #align*8
797 + orr r7, r7, r8, lsl #32-align*8
798 + stmia D!, {r0, r1, r2, r3}
799 + stmia D!, {r4, r5, r6, r7}
800 + .endif
801 + .endif
802 +.endm
803 +
804 +.macro memcpy_leading_15bytes backwards, align
805 + movs DAT1, DAT2, lsl #31
806 + sub N, N, DAT2
807 + .if backwards
808 + ldrmib DAT0, [S, #-1]!
809 + ldrcsh DAT1, [S, #-2]!
810 + strmib DAT0, [D, #-1]!
811 + strcsh DAT1, [D, #-2]!
812 + .else
813 + ldrmib DAT0, [S], #1
814 + ldrcsh DAT1, [S], #2
815 + strmib DAT0, [D], #1
816 + strcsh DAT1, [D], #2
817 + .endif
818 + movs DAT1, DAT2, lsl #29
819 + .if backwards
820 + ldrmi DAT0, [S, #-4]!
821 + .if align == 0
822 + ldmcsdb S!, {DAT1, DAT2}
823 + .else
824 + ldrcs DAT2, [S, #-4]!
825 + ldrcs DAT1, [S, #-4]!
826 + .endif
827 + strmi DAT0, [D, #-4]!
828 + stmcsdb D!, {DAT1, DAT2}
829 + .else
830 + ldrmi DAT0, [S], #4
831 + .if align == 0
832 + ldmcsia S!, {DAT1, DAT2}
833 + .else
834 + ldrcs DAT1, [S], #4
835 + ldrcs DAT2, [S], #4
836 + .endif
837 + strmi DAT0, [D], #4
838 + stmcsia D!, {DAT1, DAT2}
839 + .endif
840 +.endm
841 +
842 +.macro memcpy_trailing_15bytes backwards, align
843 + movs N, N, lsl #29
844 + .if backwards
845 + .if align == 0
846 + ldmcsdb S!, {DAT0, DAT1}
847 + .else
848 + ldrcs DAT1, [S, #-4]!
849 + ldrcs DAT0, [S, #-4]!
850 + .endif
851 + ldrmi DAT2, [S, #-4]!
852 + stmcsdb D!, {DAT0, DAT1}
853 + strmi DAT2, [D, #-4]!
854 + .else
855 + .if align == 0
856 + ldmcsia S!, {DAT0, DAT1}
857 + .else
858 + ldrcs DAT0, [S], #4
859 + ldrcs DAT1, [S], #4
860 + .endif
861 + ldrmi DAT2, [S], #4
862 + stmcsia D!, {DAT0, DAT1}
863 + strmi DAT2, [D], #4
864 + .endif
865 + movs N, N, lsl #2
866 + .if backwards
867 + ldrcsh DAT0, [S, #-2]!
868 + ldrmib DAT1, [S, #-1]
869 + strcsh DAT0, [D, #-2]!
870 + strmib DAT1, [D, #-1]
871 + .else
872 + ldrcsh DAT0, [S], #2
873 + ldrmib DAT1, [S]
874 + strcsh DAT0, [D], #2
875 + strmib DAT1, [D]
876 + .endif
877 +.endm
878 +
879 +.macro memcpy_long_inner_loop backwards, align
880 + .if align != 0
881 + .if backwards
882 + ldr DAT0, [S, #-align]!
883 + .else
884 + ldr LAST, [S, #-align]!
885 + .endif
886 + .endif
887 +110:
888 + .if align == 0
889 + .if backwards
890 + ldmdb S!, {DAT0, DAT1, DAT2, DAT3, DAT4, DAT5, DAT6, LAST}
891 + pld [S, OFF]
892 + stmdb D!, {DAT4, DAT5, DAT6, LAST}
893 + stmdb D!, {DAT0, DAT1, DAT2, DAT3}
894 + .else
895 + ldmia S!, {DAT0, DAT1, DAT2, DAT3, DAT4, DAT5, DAT6, LAST}
896 + pld [S, OFF]
897 + stmia D!, {DAT0, DAT1, DAT2, DAT3}
898 + stmia D!, {DAT4, DAT5, DAT6, LAST}
899 + .endif
900 + .else
901 + unaligned_words backwards, align, 1, 8, DAT0, DAT1, DAT2, DAT3, DAT4, DAT5, DAT6, DAT7, LAST
902 + .endif
903 + subs N, N, #32
904 + bhs 110b
905 + /* Just before the final (prefetch_distance+1) 32-byte blocks, deal with final preloads */
906 + preload_trailing backwards, S, N, OFF
907 + add N, N, #(prefetch_distance+2)*32 - 32
908 +120:
909 + .if align == 0
910 + .if backwards
911 + ldmdb S!, {DAT0, DAT1, DAT2, DAT3, DAT4, DAT5, DAT6, LAST}
912 + stmdb D!, {DAT4, DAT5, DAT6, LAST}
913 + stmdb D!, {DAT0, DAT1, DAT2, DAT3}
914 + .else
915 + ldmia S!, {DAT0, DAT1, DAT2, DAT3, DAT4, DAT5, DAT6, LAST}
916 + stmia D!, {DAT0, DAT1, DAT2, DAT3}
917 + stmia D!, {DAT4, DAT5, DAT6, LAST}
918 + .endif
919 + .else
920 + unaligned_words backwards, align, 0, 8, DAT0, DAT1, DAT2, DAT3, DAT4, DAT5, DAT6, DAT7, LAST
921 + .endif
922 + subs N, N, #32
923 + bhs 120b
924 + tst N, #16
925 + .if align == 0
926 + .if backwards
927 + ldmnedb S!, {DAT0, DAT1, DAT2, LAST}
928 + stmnedb D!, {DAT0, DAT1, DAT2, LAST}
929 + .else
930 + ldmneia S!, {DAT0, DAT1, DAT2, LAST}
931 + stmneia D!, {DAT0, DAT1, DAT2, LAST}
932 + .endif
933 + .else
934 + beq 130f
935 + unaligned_words backwards, align, 0, 4, DAT0, DAT1, DAT2, DAT3, LAST
936 +130:
937 + .endif
938 + /* Trailing words and bytes */
939 + tst N, #15
940 + beq 199f
941 + .if align != 0
942 + add S, S, #align
943 + .endif
944 + memcpy_trailing_15bytes backwards, align
945 +199:
946 + pop {DAT3, DAT4, DAT5, DAT6, DAT7}
947 + pop {D, DAT1, DAT2, pc}
948 +.endm
949 +
950 +.macro memcpy_medium_inner_loop backwards, align
951 +120:
952 + .if backwards
953 + .if align == 0
954 + ldmdb S!, {DAT0, DAT1, DAT2, LAST}
955 + .else
956 + ldr LAST, [S, #-4]!
957 + ldr DAT2, [S, #-4]!
958 + ldr DAT1, [S, #-4]!
959 + ldr DAT0, [S, #-4]!
960 + .endif
961 + stmdb D!, {DAT0, DAT1, DAT2, LAST}
962 + .else
963 + .if align == 0
964 + ldmia S!, {DAT0, DAT1, DAT2, LAST}
965 + .else
966 + ldr DAT0, [S], #4
967 + ldr DAT1, [S], #4
968 + ldr DAT2, [S], #4
969 + ldr LAST, [S], #4
970 + .endif
971 + stmia D!, {DAT0, DAT1, DAT2, LAST}
972 + .endif
973 + subs N, N, #16
974 + bhs 120b
975 + /* Trailing words and bytes */
976 + tst N, #15
977 + beq 199f
978 + memcpy_trailing_15bytes backwards, align
979 +199:
980 + pop {D, DAT1, DAT2, pc}
981 +.endm
982 +
983 +.macro memcpy_short_inner_loop backwards, align
984 + tst N, #16
985 + .if backwards
986 + .if align == 0
987 + ldmnedb S!, {DAT0, DAT1, DAT2, LAST}
988 + .else
989 + ldrne LAST, [S, #-4]!
990 + ldrne DAT2, [S, #-4]!
991 + ldrne DAT1, [S, #-4]!
992 + ldrne DAT0, [S, #-4]!
993 + .endif
994 + stmnedb D!, {DAT0, DAT1, DAT2, LAST}
995 + .else
996 + .if align == 0
997 + ldmneia S!, {DAT0, DAT1, DAT2, LAST}
998 + .else
999 + ldrne DAT0, [S], #4
1000 + ldrne DAT1, [S], #4
1001 + ldrne DAT2, [S], #4
1002 + ldrne LAST, [S], #4
1003 + .endif
1004 + stmneia D!, {DAT0, DAT1, DAT2, LAST}
1005 + .endif
1006 + memcpy_trailing_15bytes backwards, align
1007 +199:
1008 + pop {D, DAT1, DAT2, pc}
1009 +.endm
1010 +
1011 +.macro memcpy backwards
1012 + D .req a1
1013 + S .req a2
1014 + N .req a3
1015 + DAT0 .req a4
1016 + DAT1 .req v1
1017 + DAT2 .req v2
1018 + DAT3 .req v3
1019 + DAT4 .req v4
1020 + DAT5 .req v5
1021 + DAT6 .req v6
1022 + DAT7 .req sl
1023 + LAST .req ip
1024 + OFF .req lr
1025 +
1026 + .cfi_startproc
1027 +
1028 + push {D, DAT1, DAT2, lr}
1029 +
1030 + .cfi_def_cfa_offset 16
1031 + .cfi_rel_offset D, 0
1032 + .cfi_undefined S
1033 + .cfi_undefined N
1034 + .cfi_undefined DAT0
1035 + .cfi_rel_offset DAT1, 4
1036 + .cfi_rel_offset DAT2, 8
1037 + .cfi_undefined LAST
1038 + .cfi_rel_offset lr, 12
1039 +
1040 + .if backwards
1041 + add D, D, N
1042 + add S, S, N
1043 + .endif
1044 +
1045 + /* See if we're guaranteed to have at least one 16-byte aligned 16-byte write */
1046 + cmp N, #31
1047 + blo 170f
1048 + /* To preload ahead as we go, we need at least (prefetch_distance+2) 32-byte blocks */
1049 + cmp N, #(prefetch_distance+3)*32 - 1
1050 + blo 160f
1051 +
1052 + /* Long case */
1053 + push {DAT3, DAT4, DAT5, DAT6, DAT7}
1054 +
1055 + .cfi_def_cfa_offset 36
1056 + .cfi_rel_offset D, 20
1057 + .cfi_rel_offset DAT1, 24
1058 + .cfi_rel_offset DAT2, 28
1059 + .cfi_rel_offset DAT3, 0
1060 + .cfi_rel_offset DAT4, 4
1061 + .cfi_rel_offset DAT5, 8
1062 + .cfi_rel_offset DAT6, 12
1063 + .cfi_rel_offset DAT7, 16
1064 + .cfi_rel_offset lr, 32
1065 +
1066 + /* Adjust N so that the decrement instruction can also test for
1067 + * inner loop termination. We want it to stop when there are
1068 + * (prefetch_distance+1) complete blocks to go. */
1069 + sub N, N, #(prefetch_distance+2)*32
1070 + preload_leading_step1 backwards, DAT0, S
1071 + .if backwards
1072 + /* Bug in GAS: it accepts, but mis-assembles the instruction
1073 + * ands DAT2, D, #60, 2
1074 + * which sets DAT2 to the number of leading bytes until destination is aligned and also clears C (sets borrow)
1075 + */
1076 + .word 0xE210513C
1077 + beq 154f
1078 + .else
1079 + ands DAT2, D, #15
1080 + beq 154f
1081 + rsb DAT2, DAT2, #16 /* number of leading bytes until destination aligned */
1082 + .endif
1083 + preload_leading_step2 backwards, DAT0, S, DAT2, OFF
1084 + memcpy_leading_15bytes backwards, 1
1085 +154: /* Destination now 16-byte aligned; we have at least one prefetch as well as at least one 16-byte output block */
1086 + /* Prefetch offset is best selected such that it lies in the first 8 of each 32 bytes - but it's just as easy to aim for the first one */
1087 + .if backwards
1088 + rsb OFF, S, #3
1089 + and OFF, OFF, #28
1090 + sub OFF, OFF, #32*(prefetch_distance+1)
1091 + .else
1092 + and OFF, S, #28
1093 + rsb OFF, OFF, #32*prefetch_distance
1094 + .endif
1095 + movs DAT0, S, lsl #31
1096 + bhi 157f
1097 + bcs 156f
1098 + bmi 155f
1099 + memcpy_long_inner_loop backwards, 0
1100 +155: memcpy_long_inner_loop backwards, 1
1101 +156: memcpy_long_inner_loop backwards, 2
1102 +157: memcpy_long_inner_loop backwards, 3
1103 +
1104 + .cfi_def_cfa_offset 16
1105 + .cfi_rel_offset D, 0
1106 + .cfi_rel_offset DAT1, 4
1107 + .cfi_rel_offset DAT2, 8
1108 + .cfi_same_value DAT3
1109 + .cfi_same_value DAT4
1110 + .cfi_same_value DAT5
1111 + .cfi_same_value DAT6
1112 + .cfi_same_value DAT7
1113 + .cfi_rel_offset lr, 12
1114 +
1115 +160: /* Medium case */
1116 + preload_all backwards, 0, 0, S, N, DAT2, OFF
1117 + sub N, N, #16 /* simplifies inner loop termination */
1118 + .if backwards
1119 + ands DAT2, D, #15
1120 + beq 164f
1121 + .else
1122 + ands DAT2, D, #15
1123 + beq 164f
1124 + rsb DAT2, DAT2, #16
1125 + .endif
1126 + memcpy_leading_15bytes backwards, align
1127 +164: /* Destination now 16-byte aligned; we have at least one 16-byte output block */
1128 + tst S, #3
1129 + bne 140f
1130 + memcpy_medium_inner_loop backwards, 0
1131 +140: memcpy_medium_inner_loop backwards, 1
1132 +
1133 +170: /* Short case, less than 31 bytes, so no guarantee of at least one 16-byte block */
1134 + teq N, #0
1135 + beq 199f
1136 + preload_all backwards, 1, 0, S, N, DAT2, LAST
1137 + tst D, #3
1138 + beq 174f
1139 +172: subs N, N, #1
1140 + blo 199f
1141 + .if backwards
1142 + ldrb DAT0, [S, #-1]!
1143 + strb DAT0, [D, #-1]!
1144 + .else
1145 + ldrb DAT0, [S], #1
1146 + strb DAT0, [D], #1
1147 + .endif
1148 + tst D, #3
1149 + bne 172b
1150 +174: /* Destination now 4-byte aligned; we have 0 or more output bytes to go */
1151 + tst S, #3
1152 + bne 140f
1153 + memcpy_short_inner_loop backwards, 0
1154 +140: memcpy_short_inner_loop backwards, 1
1155 +
1156 + .cfi_endproc
1157 +
1158 + .unreq D
1159 + .unreq S
1160 + .unreq N
1161 + .unreq DAT0
1162 + .unreq DAT1
1163 + .unreq DAT2
1164 + .unreq DAT3
1165 + .unreq DAT4
1166 + .unreq DAT5
1167 + .unreq DAT6
1168 + .unreq DAT7
1169 + .unreq LAST
1170 + .unreq OFF
1171 +.endm
1172 --- /dev/null
1173 +++ b/arch/arm/lib/memmove_rpi.S
1174 @@ -0,0 +1,61 @@
1175 +/*
1176 +Copyright (c) 2013, Raspberry Pi Foundation
1177 +Copyright (c) 2013, RISC OS Open Ltd
1178 +All rights reserved.
1179 +
1180 +Redistribution and use in source and binary forms, with or without
1181 +modification, are permitted provided that the following conditions are met:
1182 + * Redistributions of source code must retain the above copyright
1183 + notice, this list of conditions and the following disclaimer.
1184 + * Redistributions in binary form must reproduce the above copyright
1185 + notice, this list of conditions and the following disclaimer in the
1186 + documentation and/or other materials provided with the distribution.
1187 + * Neither the name of the copyright holder nor the
1188 + names of its contributors may be used to endorse or promote products
1189 + derived from this software without specific prior written permission.
1190 +
1191 +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
1192 +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1193 +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1194 +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
1195 +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1196 +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1197 +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1198 +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1199 +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1200 +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1201 +*/
1202 +
1203 +#include <linux/linkage.h>
1204 +#include "arm-mem.h"
1205 +#include "memcpymove.h"
1206 +
1207 +/* Prevent the stack from becoming executable */
1208 +#if defined(__linux__) && defined(__ELF__)
1209 +.section .note.GNU-stack,"",%progbits
1210 +#endif
1211 +
1212 + .text
1213 + .arch armv6
1214 + .object_arch armv4
1215 + .arm
1216 + .altmacro
1217 + .p2align 2
1218 +
1219 +/*
1220 + * void *memmove(void *s1, const void *s2, size_t n);
1221 + * On entry:
1222 + * a1 = pointer to destination
1223 + * a2 = pointer to source
1224 + * a3 = number of bytes to copy
1225 + * On exit:
1226 + * a1 preserved
1227 + */
1228 +
1229 +.set prefetch_distance, 3
1230 +
1231 +ENTRY(memmove)
1232 + cmp a2, a1
1233 + bpl memcpy /* pl works even over -1 - 0 and 0x7fffffff - 0x80000000 boundaries */
1234 + memcpy 1
1235 +ENDPROC(memmove)
1236 --- /dev/null
1237 +++ b/arch/arm/lib/memset_rpi.S
1238 @@ -0,0 +1,123 @@
1239 +/*
1240 +Copyright (c) 2013, Raspberry Pi Foundation
1241 +Copyright (c) 2013, RISC OS Open Ltd
1242 +All rights reserved.
1243 +
1244 +Redistribution and use in source and binary forms, with or without
1245 +modification, are permitted provided that the following conditions are met:
1246 + * Redistributions of source code must retain the above copyright
1247 + notice, this list of conditions and the following disclaimer.
1248 + * Redistributions in binary form must reproduce the above copyright
1249 + notice, this list of conditions and the following disclaimer in the
1250 + documentation and/or other materials provided with the distribution.
1251 + * Neither the name of the copyright holder nor the
1252 + names of its contributors may be used to endorse or promote products
1253 + derived from this software without specific prior written permission.
1254 +
1255 +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
1256 +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1257 +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1258 +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
1259 +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1260 +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1261 +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1262 +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1263 +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1264 +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1265 +*/
1266 +
1267 +#include <linux/linkage.h>
1268 +#include "arm-mem.h"
1269 +
1270 +/* Prevent the stack from becoming executable */
1271 +#if defined(__linux__) && defined(__ELF__)
1272 +.section .note.GNU-stack,"",%progbits
1273 +#endif
1274 +
1275 + .text
1276 + .arch armv6
1277 + .object_arch armv4
1278 + .arm
1279 + .altmacro
1280 + .p2align 2
1281 +
1282 +/*
1283 + * void *memset(void *s, int c, size_t n);
1284 + * On entry:
1285 + * a1 = pointer to buffer to fill
1286 + * a2 = byte pattern to fill with (caller-narrowed)
1287 + * a3 = number of bytes to fill
1288 + * On exit:
1289 + * a1 preserved
1290 + */
1291 +ENTRY(mmioset)
1292 +ENTRY(memset)
1293 + S .req a1
1294 + DAT0 .req a2
1295 + N .req a3
1296 + DAT1 .req a4
1297 + DAT2 .req ip
1298 + DAT3 .req lr
1299 +
1300 + orr DAT0, DAT0, lsl #8
1301 + push {S, lr}
1302 + orr DAT0, DAT0, lsl #16
1303 + mov DAT1, DAT0
1304 +
1305 + /* See if we're guaranteed to have at least one 16-byte aligned 16-byte write */
1306 + cmp N, #31
1307 + blo 170f
1308 +
1309 +161: sub N, N, #16 /* simplifies inner loop termination */
1310 + /* Leading words and bytes */
1311 + tst S, #15
1312 + beq 164f
1313 + rsb DAT3, S, #0 /* bits 0-3 = number of leading bytes until aligned */
1314 + movs DAT2, DAT3, lsl #31
1315 + submi N, N, #1
1316 + strmib DAT0, [S], #1
1317 + subcs N, N, #2
1318 + strcsh DAT0, [S], #2
1319 + movs DAT2, DAT3, lsl #29
1320 + submi N, N, #4
1321 + strmi DAT0, [S], #4
1322 + subcs N, N, #8
1323 + stmcsia S!, {DAT0, DAT1}
1324 +164: /* Delayed set up of DAT2 and DAT3 so we could use them as scratch registers above */
1325 + mov DAT2, DAT0
1326 + mov DAT3, DAT0
1327 + /* Now the inner loop of 16-byte stores */
1328 +165: stmia S!, {DAT0, DAT1, DAT2, DAT3}
1329 + subs N, N, #16
1330 + bhs 165b
1331 +166: /* Trailing words and bytes */
1332 + movs N, N, lsl #29
1333 + stmcsia S!, {DAT0, DAT1}
1334 + strmi DAT0, [S], #4
1335 + movs N, N, lsl #2
1336 + strcsh DAT0, [S], #2
1337 + strmib DAT0, [S]
1338 +199: pop {S, pc}
1339 +
1340 +170: /* Short case */
1341 + mov DAT2, DAT0
1342 + mov DAT3, DAT0
1343 + tst S, #3
1344 + beq 174f
1345 +172: subs N, N, #1
1346 + blo 199b
1347 + strb DAT0, [S], #1
1348 + tst S, #3
1349 + bne 172b
1350 +174: tst N, #16
1351 + stmneia S!, {DAT0, DAT1, DAT2, DAT3}
1352 + b 166b
1353 +
1354 + .unreq S
1355 + .unreq DAT0
1356 + .unreq N
1357 + .unreq DAT1
1358 + .unreq DAT2
1359 + .unreq DAT3
1360 +ENDPROC(memset)
1361 +ENDPROC(mmioset)
1362 --- a/arch/arm/lib/uaccess_with_memcpy.c
1363 +++ b/arch/arm/lib/uaccess_with_memcpy.c
1364 @@ -22,6 +22,14 @@
1365 #include <asm/current.h>
1366 #include <asm/page.h>
1367
1368 +#ifndef COPY_FROM_USER_THRESHOLD
1369 +#define COPY_FROM_USER_THRESHOLD 64
1370 +#endif
1371 +
1372 +#ifndef COPY_TO_USER_THRESHOLD
1373 +#define COPY_TO_USER_THRESHOLD 64
1374 +#endif
1375 +
1376 static int
1377 pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
1378 {
1379 @@ -85,7 +93,44 @@ pin_page_for_write(const void __user *_a
1380 return 1;
1381 }
1382
1383 -static unsigned long noinline
1384 +static int
1385 +pin_page_for_read(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
1386 +{
1387 + unsigned long addr = (unsigned long)_addr;
1388 + pgd_t *pgd;
1389 + pmd_t *pmd;
1390 + pte_t *pte;
1391 + pud_t *pud;
1392 + spinlock_t *ptl;
1393 +
1394 + pgd = pgd_offset(current->mm, addr);
1395 + if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
1396 + {
1397 + return 0;
1398 + }
1399 + pud = pud_offset(pgd, addr);
1400 + if (unlikely(pud_none(*pud) || pud_bad(*pud)))
1401 + {
1402 + return 0;
1403 + }
1404 +
1405 + pmd = pmd_offset(pud, addr);
1406 + if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd)))
1407 + return 0;
1408 +
1409 + pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
1410 + if (unlikely(!pte_present(*pte) || !pte_young(*pte))) {
1411 + pte_unmap_unlock(pte, ptl);
1412 + return 0;
1413 + }
1414 +
1415 + *ptep = pte;
1416 + *ptlp = ptl;
1417 +
1418 + return 1;
1419 +}
1420 +
1421 +unsigned long noinline
1422 __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
1423 {
1424 unsigned long ua_flags;
1425 @@ -138,6 +183,54 @@ out:
1426 return n;
1427 }
1428
1429 +unsigned long noinline
1430 +__copy_from_user_memcpy(void *to, const void __user *from, unsigned long n)
1431 +{
1432 + int atomic;
1433 +
1434 + if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
1435 + memcpy(to, (const void *)from, n);
1436 + return 0;
1437 + }
1438 +
1439 + /* the mmap semaphore is taken only if not in an atomic context */
1440 + atomic = in_atomic();
1441 +
1442 + if (!atomic)
1443 + down_read(&current->mm->mmap_sem);
1444 + while (n) {
1445 + pte_t *pte;
1446 + spinlock_t *ptl;
1447 + int tocopy;
1448 +
1449 + while (!pin_page_for_read(from, &pte, &ptl)) {
1450 + char temp;
1451 + if (!atomic)
1452 + up_read(&current->mm->mmap_sem);
1453 + if (__get_user(temp, (char __user *)from))
1454 + goto out;
1455 + if (!atomic)
1456 + down_read(&current->mm->mmap_sem);
1457 + }
1458 +
1459 + tocopy = (~(unsigned long)from & ~PAGE_MASK) + 1;
1460 + if (tocopy > n)
1461 + tocopy = n;
1462 +
1463 + memcpy(to, (const void *)from, tocopy);
1464 + to += tocopy;
1465 + from += tocopy;
1466 + n -= tocopy;
1467 +
1468 + pte_unmap_unlock(pte, ptl);
1469 + }
1470 + if (!atomic)
1471 + up_read(&current->mm->mmap_sem);
1472 +
1473 +out:
1474 + return n;
1475 +}
1476 +
1477 unsigned long
1478 arm_copy_to_user(void __user *to, const void *from, unsigned long n)
1479 {
1480 @@ -148,7 +241,7 @@ arm_copy_to_user(void __user *to, const
1481 * With frame pointer disabled, tail call optimization kicks in
1482 * as well making this test almost invisible.
1483 */
1484 - if (n < 64) {
1485 + if (n < COPY_TO_USER_THRESHOLD) {
1486 unsigned long ua_flags = uaccess_save_and_enable();
1487 n = __copy_to_user_std(to, from, n);
1488 uaccess_restore(ua_flags);
1489 @@ -157,6 +250,21 @@ arm_copy_to_user(void __user *to, const
1490 }
1491 return n;
1492 }
1493 +
1494 +unsigned long __must_check
1495 +arm_copy_from_user(void *to, const void __user *from, unsigned long n)
1496 +{
1497 + /*
1498 + * This test is stubbed out of the main function above to keep
1499 + * the overhead for small copies low by avoiding a large
1500 + * register dump on the stack just to reload them right away.
1501 + * With frame pointer disabled, tail call optimization kicks in
1502 + * as well making this test almost invisible.
1503 + */
1504 + if (n < COPY_FROM_USER_THRESHOLD)
1505 + return __copy_from_user_std(to, from, n);
1506 + return __copy_from_user_memcpy(to, from, n);
1507 +}
1508
1509 static unsigned long noinline
1510 __clear_user_memset(void __user *addr, unsigned long n)