1 From 4514b02d95de8be6d6298ee20a5204815cf669d0 Mon Sep 17 00:00:00 2001
2 From: Phil Elwell <phil@raspberrypi.org>
3 Date: Mon, 13 Oct 2014 11:47:53 +0100
4 Subject: [PATCH 079/232] Improve __copy_to_user and __copy_from_user
7 Provide a __copy_from_user that uses memcpy. On BCM2708, use
8 optimised memcpy/memmove/memcmp/memset implementations.
10 arch/arm: Add mmiocpy/set aliases for memcpy/set
12 See: https://github.com/raspberrypi/linux/issues/1082
14 arch/arm/include/asm/string.h | 5 +
15 arch/arm/include/asm/uaccess.h | 3 +
16 arch/arm/lib/Makefile | 15 +-
17 arch/arm/lib/arm-mem.h | 159 ++++++++++++
18 arch/arm/lib/copy_from_user.S | 4 +-
19 arch/arm/lib/exports_rpi.c | 37 +++
20 arch/arm/lib/memcmp_rpi.S | 285 +++++++++++++++++++++
21 arch/arm/lib/memcpy_rpi.S | 61 +++++
22 arch/arm/lib/memcpymove.h | 506 +++++++++++++++++++++++++++++++++++++
23 arch/arm/lib/memmove_rpi.S | 61 +++++
24 arch/arm/lib/memset_rpi.S | 123 +++++++++
25 arch/arm/lib/uaccess_with_memcpy.c | 112 +++++++-
26 12 files changed, 1365 insertions(+), 6 deletions(-)
27 create mode 100644 arch/arm/lib/arm-mem.h
28 create mode 100644 arch/arm/lib/exports_rpi.c
29 create mode 100644 arch/arm/lib/memcmp_rpi.S
30 create mode 100644 arch/arm/lib/memcpy_rpi.S
31 create mode 100644 arch/arm/lib/memcpymove.h
32 create mode 100644 arch/arm/lib/memmove_rpi.S
33 create mode 100644 arch/arm/lib/memset_rpi.S
35 --- a/arch/arm/include/asm/string.h
36 +++ b/arch/arm/include/asm/string.h
37 @@ -24,6 +24,11 @@ extern void * memchr(const void *, int,
38 #define __HAVE_ARCH_MEMSET
39 extern void * memset(void *, int, __kernel_size_t);
41 +#ifdef CONFIG_MACH_BCM2708
42 +#define __HAVE_ARCH_MEMCMP
43 +extern int memcmp(const void *, const void *, size_t);
46 extern void __memzero(void *ptr, __kernel_size_t n);
48 #define memset(p,v,n) \
49 --- a/arch/arm/include/asm/uaccess.h
50 +++ b/arch/arm/include/asm/uaccess.h
51 @@ -493,6 +493,9 @@ do { \
52 extern unsigned long __must_check
53 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
55 +extern unsigned long __must_check
56 +__copy_from_user_std(void *to, const void __user *from, unsigned long n);
58 static inline unsigned long __must_check
59 __copy_from_user(void *to, const void __user *from, unsigned long n)
61 --- a/arch/arm/lib/Makefile
62 +++ b/arch/arm/lib/Makefile
65 lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
66 csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
67 - delay.o delay-loop.o findbit.o memchr.o memcpy.o \
68 - memmove.o memset.o memzero.o setbit.o \
69 - strchr.o strrchr.o \
70 + delay.o delay-loop.o findbit.o memchr.o memzero.o \
71 + setbit.o strchr.o strrchr.o \
72 testchangebit.o testclearbit.o testsetbit.o \
73 ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
74 ucmpdi2.o lib1funcs.o div64.o \
75 @@ -18,6 +17,16 @@ lib-y := backtrace.o changebit.o csumip
76 mmu-y := clear_user.o copy_page.o getuser.o putuser.o \
77 copy_from_user.o copy_to_user.o
79 +# Choose optimised implementations for Raspberry Pi
80 +ifeq ($(CONFIG_MACH_BCM2708),y)
81 + CFLAGS_uaccess_with_memcpy.o += -DCOPY_FROM_USER_THRESHOLD=1600
82 + CFLAGS_uaccess_with_memcpy.o += -DCOPY_TO_USER_THRESHOLD=672
83 + obj-$(CONFIG_MODULES) += exports_rpi.o
84 + lib-y += memcpy_rpi.o memmove_rpi.o memset_rpi.o memcmp_rpi.o
86 + lib-y += memcpy.o memmove.o memset.o
89 # using lib_ here won't override already available weak symbols
90 obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o
93 +++ b/arch/arm/lib/arm-mem.h
96 +Copyright (c) 2013, Raspberry Pi Foundation
97 +Copyright (c) 2013, RISC OS Open Ltd
100 +Redistribution and use in source and binary forms, with or without
101 +modification, are permitted provided that the following conditions are met:
102 + * Redistributions of source code must retain the above copyright
103 + notice, this list of conditions and the following disclaimer.
104 + * Redistributions in binary form must reproduce the above copyright
105 + notice, this list of conditions and the following disclaimer in the
106 + documentation and/or other materials provided with the distribution.
107 + * Neither the name of the copyright holder nor the
108 + names of its contributors may be used to endorse or promote products
109 + derived from this software without specific prior written permission.
111 +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
112 +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
113 +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
114 +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
115 +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
116 +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
117 +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
118 +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
119 +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
120 +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
129 +.macro preload_leading_step1 backwards, ptr, base
130 +/* If the destination is already 16-byte aligned, then we need to preload
131 + * between 0 and prefetch_distance (inclusive) cache lines ahead so there
132 + * are no gaps when the inner loop starts.
141 + .rept prefetch_distance+1
144 + .set OFFSET, OFFSET-32
146 + .set OFFSET, OFFSET+32
151 +.macro preload_leading_step2 backwards, ptr, base, leading_bytes, tmp
152 +/* However, if the destination is not 16-byte aligned, we may need to
153 + * preload one more cache line than that. The question we need to ask is:
154 + * are the leading bytes more than the amount by which the source
155 + * pointer will be rounded down for preloading, and if so, by how many
159 +/* Here we compare against how many bytes we are into the
160 + * cache line, counting down from the highest such address.
161 + * Effectively, we want to calculate
162 + * leading_bytes = dst&15
163 + * cacheline_offset = 31-((src-leading_bytes-1)&31)
164 + * extra_needed = leading_bytes - cacheline_offset
165 + * and test if extra_needed is <= 0, or rearranging:
166 + * leading_bytes + (src-leading_bytes-1)&31 <= 31
168 + mov tmp, base, lsl #32-5
169 + sbc tmp, tmp, leading_bytes, lsl #32-5
170 + adds tmp, tmp, leading_bytes, lsl #32-5
172 + pld [ptr, #-32*(prefetch_distance+1)]
174 +/* Effectively, we want to calculate
175 + * leading_bytes = (-dst)&15
176 + * cacheline_offset = (src+leading_bytes)&31
177 + * extra_needed = leading_bytes - cacheline_offset
178 + * and test if extra_needed is <= 0.
180 + mov tmp, base, lsl #32-5
181 + add tmp, tmp, leading_bytes, lsl #32-5
182 + rsbs tmp, tmp, leading_bytes, lsl #32-5
184 + pld [ptr, #32*(prefetch_distance+1)]
189 +.macro preload_trailing backwards, base, remain, tmp
190 + /* We need either 0, 1 or 2 extra preloads */
193 + mov tmp, tmp, lsl #32-5
195 + mov tmp, base, lsl #32-5
197 + adds tmp, tmp, remain, lsl #32-5
198 + adceqs tmp, tmp, #0
199 + /* The instruction above has two effects: ensures Z is only
200 + * set if C was clear (so Z indicates that both shifted quantities
201 + * were 0), and clears C if Z was set (so C indicates that the sum
202 + * of the shifted quantities was greater and not equal to 32) */
212 + pld [tmp, #-32*(prefetch_distance+1)]
214 + pld [tmp, #-32*prefetch_distance]
216 + pld [tmp, #32*(prefetch_distance+2)]
218 + pld [tmp, #32*(prefetch_distance+1)]
223 +.macro preload_all backwards, narrow_case, shift, base, remain, tmp0, tmp1
226 + bic tmp0, tmp0, #31
228 + sub tmp1, base, remain, lsl #shift
230 + bic tmp0, base, #31
232 + add tmp1, base, remain, lsl #shift
235 + bic tmp1, tmp1, #31
239 + /* In this case, all the data fits in either 1 or 2 cache lines */
244 + sub tmp0, tmp0, #32
246 + add tmp0, tmp0, #32
254 --- a/arch/arm/lib/copy_from_user.S
255 +++ b/arch/arm/lib/copy_from_user.S
260 -ENTRY(arm_copy_from_user)
261 +ENTRY(__copy_from_user_std)
262 +WEAK(arm_copy_from_user)
264 #include "copy_template.S"
266 ENDPROC(arm_copy_from_user)
267 +ENDPROC(__copy_from_user_std)
269 .pushsection .fixup,"ax"
272 +++ b/arch/arm/lib/exports_rpi.c
275 + * Copyright (c) 2014, Raspberry Pi (Trading) Ltd.
277 + * Redistribution and use in source and binary forms, with or without
278 + * modification, are permitted provided that the following conditions
280 + * 1. Redistributions of source code must retain the above copyright
281 + * notice, this list of conditions, and the following disclaimer,
282 + * without modification.
283 + * 2. Redistributions in binary form must reproduce the above copyright
284 + * notice, this list of conditions and the following disclaimer in the
285 + * documentation and/or other materials provided with the distribution.
286 + * 3. The names of the above-listed copyright holders may not be used
287 + * to endorse or promote products derived from this software without
288 + * specific prior written permission.
290 + * ALTERNATIVELY, this software may be distributed under the terms of the
291 + * GNU General Public License ("GPL") version 2, as published by the Free
292 + * Software Foundation.
294 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
295 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
296 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
297 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
298 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
299 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
300 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
301 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
302 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
303 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
304 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
307 +#include <linux/kernel.h>
308 +#include <linux/module.h>
310 +EXPORT_SYMBOL(memcmp);
312 +++ b/arch/arm/lib/memcmp_rpi.S
315 +Copyright (c) 2013, Raspberry Pi Foundation
316 +Copyright (c) 2013, RISC OS Open Ltd
317 +All rights reserved.
319 +Redistribution and use in source and binary forms, with or without
320 +modification, are permitted provided that the following conditions are met:
321 + * Redistributions of source code must retain the above copyright
322 + notice, this list of conditions and the following disclaimer.
323 + * Redistributions in binary form must reproduce the above copyright
324 + notice, this list of conditions and the following disclaimer in the
325 + documentation and/or other materials provided with the distribution.
326 + * Neither the name of the copyright holder nor the
327 + names of its contributors may be used to endorse or promote products
328 + derived from this software without specific prior written permission.
330 +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
331 +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
332 +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
333 +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
334 +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
335 +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
336 +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
337 +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
338 +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
339 +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
342 +#include <linux/linkage.h>
343 +#include "arm-mem.h"
345 +/* Prevent the stack from becoming executable */
346 +#if defined(__linux__) && defined(__ELF__)
347 +.section .note.GNU-stack,"",%progbits
357 +.macro memcmp_process_head unaligned
359 + ldr DAT0, [S_1], #4
360 + ldr DAT1, [S_1], #4
361 + ldr DAT2, [S_1], #4
362 + ldr DAT3, [S_1], #4
364 + ldmia S_1!, {DAT0, DAT1, DAT2, DAT3}
366 + ldmia S_2!, {DAT4, DAT5, DAT6, DAT7}
369 +.macro memcmp_process_tail
377 +.macro memcmp_leading_31bytes
378 + movs DAT0, OFF, lsl #31
379 + ldrmib DAT0, [S_1], #1
380 + ldrcsh DAT1, [S_1], #2
381 + ldrmib DAT4, [S_2], #1
382 + ldrcsh DAT5, [S_2], #2
392 + movs DAT0, OFF, lsl #29
393 + ldrmi DAT0, [S_1], #4
394 + ldrcs DAT1, [S_1], #4
395 + ldrcs DAT2, [S_1], #4
396 + ldrmi DAT4, [S_2], #4
397 + ldmcsia S_2!, {DAT5, DAT6}
412 + memcmp_process_head 1
414 + memcmp_process_tail
418 +.macro memcmp_trailing_15bytes unaligned
421 + ldrcs DAT0, [S_1], #4
422 + ldrcs DAT1, [S_1], #4
424 + ldmcsia S_1!, {DAT0, DAT1}
426 + ldrmi DAT2, [S_1], #4
427 + ldmcsia S_2!, {DAT4, DAT5}
428 + ldrmi DAT6, [S_2], #4
440 + ldrcsh DAT0, [S_1], #2
442 + ldrcsh DAT4, [S_2], #2
453 +.macro memcmp_long_inner_loop unaligned
455 + memcmp_process_head unaligned
456 + pld [S_2, #prefetch_distance*32 + 16]
457 + memcmp_process_tail
458 + memcmp_process_head unaligned
460 + memcmp_process_tail
463 + /* Just before the final (prefetch_distance+1) 32-byte blocks,
464 + * deal with final preloads */
465 + preload_trailing 0, S_1, N, DAT0
466 + preload_trailing 0, S_2, N, DAT0
467 + add N, N, #(prefetch_distance+2)*32 - 16
469 + memcmp_process_head unaligned
470 + memcmp_process_tail
473 + /* Trailing words and bytes */
476 + memcmp_trailing_15bytes unaligned
477 +199: /* Reached end without detecting a difference */
480 + pop {DAT1-DAT6, pc}
483 +.macro memcmp_short_inner_loop unaligned
484 + subs N, N, #16 /* simplifies inner loop termination */
487 + memcmp_process_head unaligned
488 + memcmp_process_tail
491 +122: /* Trailing words and bytes */
494 + memcmp_trailing_15bytes unaligned
495 +199: /* Reached end without detecting a difference */
498 + pop {DAT1-DAT6, pc}
502 + * int memcmp(const void *s1, const void *s2, size_t n);
504 + * a1 = pointer to buffer 1
505 + * a2 = pointer to buffer 2
506 + * a3 = number of bytes to compare (as unsigned chars)
508 + * a1 = >0/=0/<0 if s1 >/=/< s2
511 +.set prefetch_distance, 2
527 + push {DAT1-DAT6, lr}
528 + setend be /* lowest-addressed bytes are most significant */
530 + /* To preload ahead as we go, we need at least (prefetch_distance+2) 32-byte blocks */
531 + cmp N, #(prefetch_distance+3)*32 - 1
535 + /* Adjust N so that the decrement instruction can also test for
536 + * inner loop termination. We want it to stop when there are
537 + * (prefetch_distance+1) complete blocks to go. */
538 + sub N, N, #(prefetch_distance+2)*32
539 + preload_leading_step1 0, DAT0, S_1
540 + preload_leading_step1 0, DAT1, S_2
543 + rsb OFF, S_2, #0 /* no need to AND with 15 here */
544 + preload_leading_step2 0, DAT0, S_1, OFF, DAT2
545 + preload_leading_step2 0, DAT1, S_2, OFF, DAT2
546 + memcmp_leading_31bytes
547 +154: /* Second source now cacheline (32-byte) aligned; we have at
548 + * least one prefetch to go. */
549 + /* Prefetch offset is best selected such that it lies in the
550 + * first 8 of each 32 bytes - but it's just as easy to aim for
553 + rsb OFF, OFF, #32*prefetch_distance
556 + memcmp_long_inner_loop 0
557 +140: memcmp_long_inner_loop 1
559 +170: /* Short case */
562 + preload_all 0, 0, 0, S_1, N, DAT0, DAT1
563 + preload_all 0, 0, 0, S_2, N, DAT0, DAT1
568 + ldrb DAT0, [S_1], #1
569 + ldrb DAT4, [S_2], #1
574 +174: /* Second source now 4-byte aligned; we have 0 or more bytes to go */
577 + memcmp_short_inner_loop 0
578 +140: memcmp_short_inner_loop 1
580 +200: /* Difference found: determine sign. */
584 + pop {DAT1-DAT6, pc}
600 +++ b/arch/arm/lib/memcpy_rpi.S
603 +Copyright (c) 2013, Raspberry Pi Foundation
604 +Copyright (c) 2013, RISC OS Open Ltd
605 +All rights reserved.
607 +Redistribution and use in source and binary forms, with or without
608 +modification, are permitted provided that the following conditions are met:
609 + * Redistributions of source code must retain the above copyright
610 + notice, this list of conditions and the following disclaimer.
611 + * Redistributions in binary form must reproduce the above copyright
612 + notice, this list of conditions and the following disclaimer in the
613 + documentation and/or other materials provided with the distribution.
614 + * Neither the name of the copyright holder nor the
615 + names of its contributors may be used to endorse or promote products
616 + derived from this software without specific prior written permission.
618 +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
619 +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
620 +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
621 +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
622 +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
623 +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
624 +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
625 +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
626 +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
627 +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
630 +#include <linux/linkage.h>
631 +#include "arm-mem.h"
632 +#include "memcpymove.h"
634 +/* Prevent the stack from becoming executable */
635 +#if defined(__linux__) && defined(__ELF__)
636 +.section .note.GNU-stack,"",%progbits
647 + * void *memcpy(void * restrict s1, const void * restrict s2, size_t n);
649 + * a1 = pointer to destination
650 + * a2 = pointer to source
651 + * a3 = number of bytes to copy
656 +.set prefetch_distance, 3
664 +++ b/arch/arm/lib/memcpymove.h
667 +Copyright (c) 2013, Raspberry Pi Foundation
668 +Copyright (c) 2013, RISC OS Open Ltd
669 +All rights reserved.
671 +Redistribution and use in source and binary forms, with or without
672 +modification, are permitted provided that the following conditions are met:
673 + * Redistributions of source code must retain the above copyright
674 + notice, this list of conditions and the following disclaimer.
675 + * Redistributions in binary form must reproduce the above copyright
676 + notice, this list of conditions and the following disclaimer in the
677 + documentation and/or other materials provided with the distribution.
678 + * Neither the name of the copyright holder nor the
679 + names of its contributors may be used to endorse or promote products
680 + derived from this software without specific prior written permission.
682 +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
683 +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
684 +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
685 +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
686 +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
687 +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
688 +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
689 +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
690 +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
691 +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
694 +.macro unaligned_words backwards, align, use_pld, words, r0, r1, r2, r3, r4, r5, r6, r7, r8
697 + mov r1, r0, lsl #32-align*8
699 + orr r1, r1, r0, lsr #align*8
702 + mov r0, r1, lsr #align*8
704 + orr r0, r0, r1, lsl #32-align*8
710 + mov r2, r0, lsl #32-align*8
712 + orr r2, r2, r1, lsr #align*8
713 + mov r1, r1, lsl #32-align*8
714 + orr r1, r1, r0, lsr #align*8
718 + mov r0, r2, lsr #align*8
720 + orr r0, r0, r1, lsl #32-align*8
721 + mov r1, r1, lsr #align*8
722 + orr r1, r1, r2, lsl #32-align*8
728 + mov r4, r0, lsl #32-align*8
730 + orr r4, r4, r3, lsr #align*8
731 + mov r3, r3, lsl #32-align*8
732 + orr r3, r3, r2, lsr #align*8
733 + mov r2, r2, lsl #32-align*8
734 + orr r2, r2, r1, lsr #align*8
735 + mov r1, r1, lsl #32-align*8
736 + orr r1, r1, r0, lsr #align*8
737 + stmdb D!, {r1, r2, r3, r4}
740 + mov r0, r4, lsr #align*8
742 + orr r0, r0, r1, lsl #32-align*8
743 + mov r1, r1, lsr #align*8
744 + orr r1, r1, r2, lsl #32-align*8
745 + mov r2, r2, lsr #align*8
746 + orr r2, r2, r3, lsl #32-align*8
747 + mov r3, r3, lsr #align*8
748 + orr r3, r3, r4, lsl #32-align*8
749 + stmia D!, {r0, r1, r2, r3}
753 + ldmdb S!, {r4, r5, r6, r7}
754 + mov r8, r0, lsl #32-align*8
755 + ldmdb S!, {r0, r1, r2, r3}
759 + orr r8, r8, r7, lsr #align*8
760 + mov r7, r7, lsl #32-align*8
761 + orr r7, r7, r6, lsr #align*8
762 + mov r6, r6, lsl #32-align*8
763 + orr r6, r6, r5, lsr #align*8
764 + mov r5, r5, lsl #32-align*8
765 + orr r5, r5, r4, lsr #align*8
766 + mov r4, r4, lsl #32-align*8
767 + orr r4, r4, r3, lsr #align*8
768 + mov r3, r3, lsl #32-align*8
769 + orr r3, r3, r2, lsr #align*8
770 + mov r2, r2, lsl #32-align*8
771 + orr r2, r2, r1, lsr #align*8
772 + mov r1, r1, lsl #32-align*8
773 + orr r1, r1, r0, lsr #align*8
774 + stmdb D!, {r5, r6, r7, r8}
775 + stmdb D!, {r1, r2, r3, r4}
777 + ldmib S!, {r1, r2, r3, r4}
778 + mov r0, r8, lsr #align*8
779 + ldmib S!, {r5, r6, r7, r8}
783 + orr r0, r0, r1, lsl #32-align*8
784 + mov r1, r1, lsr #align*8
785 + orr r1, r1, r2, lsl #32-align*8
786 + mov r2, r2, lsr #align*8
787 + orr r2, r2, r3, lsl #32-align*8
788 + mov r3, r3, lsr #align*8
789 + orr r3, r3, r4, lsl #32-align*8
790 + mov r4, r4, lsr #align*8
791 + orr r4, r4, r5, lsl #32-align*8
792 + mov r5, r5, lsr #align*8
793 + orr r5, r5, r6, lsl #32-align*8
794 + mov r6, r6, lsr #align*8
795 + orr r6, r6, r7, lsl #32-align*8
796 + mov r7, r7, lsr #align*8
797 + orr r7, r7, r8, lsl #32-align*8
798 + stmia D!, {r0, r1, r2, r3}
799 + stmia D!, {r4, r5, r6, r7}
804 +.macro memcpy_leading_15bytes backwards, align
805 + movs DAT1, DAT2, lsl #31
808 + ldrmib DAT0, [S, #-1]!
809 + ldrcsh DAT1, [S, #-2]!
810 + strmib DAT0, [D, #-1]!
811 + strcsh DAT1, [D, #-2]!
813 + ldrmib DAT0, [S], #1
814 + ldrcsh DAT1, [S], #2
815 + strmib DAT0, [D], #1
816 + strcsh DAT1, [D], #2
818 + movs DAT1, DAT2, lsl #29
820 + ldrmi DAT0, [S, #-4]!
822 + ldmcsdb S!, {DAT1, DAT2}
824 + ldrcs DAT2, [S, #-4]!
825 + ldrcs DAT1, [S, #-4]!
827 + strmi DAT0, [D, #-4]!
828 + stmcsdb D!, {DAT1, DAT2}
830 + ldrmi DAT0, [S], #4
832 + ldmcsia S!, {DAT1, DAT2}
834 + ldrcs DAT1, [S], #4
835 + ldrcs DAT2, [S], #4
837 + strmi DAT0, [D], #4
838 + stmcsia D!, {DAT1, DAT2}
842 +.macro memcpy_trailing_15bytes backwards, align
846 + ldmcsdb S!, {DAT0, DAT1}
848 + ldrcs DAT1, [S, #-4]!
849 + ldrcs DAT0, [S, #-4]!
851 + ldrmi DAT2, [S, #-4]!
852 + stmcsdb D!, {DAT0, DAT1}
853 + strmi DAT2, [D, #-4]!
856 + ldmcsia S!, {DAT0, DAT1}
858 + ldrcs DAT0, [S], #4
859 + ldrcs DAT1, [S], #4
861 + ldrmi DAT2, [S], #4
862 + stmcsia D!, {DAT0, DAT1}
863 + strmi DAT2, [D], #4
867 + ldrcsh DAT0, [S, #-2]!
868 + ldrmib DAT1, [S, #-1]
869 + strcsh DAT0, [D, #-2]!
870 + strmib DAT1, [D, #-1]
872 + ldrcsh DAT0, [S], #2
874 + strcsh DAT0, [D], #2
879 +.macro memcpy_long_inner_loop backwards, align
882 + ldr DAT0, [S, #-align]!
884 + ldr LAST, [S, #-align]!
890 + ldmdb S!, {DAT0, DAT1, DAT2, DAT3, DAT4, DAT5, DAT6, LAST}
892 + stmdb D!, {DAT4, DAT5, DAT6, LAST}
893 + stmdb D!, {DAT0, DAT1, DAT2, DAT3}
895 + ldmia S!, {DAT0, DAT1, DAT2, DAT3, DAT4, DAT5, DAT6, LAST}
897 + stmia D!, {DAT0, DAT1, DAT2, DAT3}
898 + stmia D!, {DAT4, DAT5, DAT6, LAST}
901 + unaligned_words backwards, align, 1, 8, DAT0, DAT1, DAT2, DAT3, DAT4, DAT5, DAT6, DAT7, LAST
905 + /* Just before the final (prefetch_distance+1) 32-byte blocks, deal with final preloads */
906 + preload_trailing backwards, S, N, OFF
907 + add N, N, #(prefetch_distance+2)*32 - 32
911 + ldmdb S!, {DAT0, DAT1, DAT2, DAT3, DAT4, DAT5, DAT6, LAST}
912 + stmdb D!, {DAT4, DAT5, DAT6, LAST}
913 + stmdb D!, {DAT0, DAT1, DAT2, DAT3}
915 + ldmia S!, {DAT0, DAT1, DAT2, DAT3, DAT4, DAT5, DAT6, LAST}
916 + stmia D!, {DAT0, DAT1, DAT2, DAT3}
917 + stmia D!, {DAT4, DAT5, DAT6, LAST}
920 + unaligned_words backwards, align, 0, 8, DAT0, DAT1, DAT2, DAT3, DAT4, DAT5, DAT6, DAT7, LAST
927 + ldmnedb S!, {DAT0, DAT1, DAT2, LAST}
928 + stmnedb D!, {DAT0, DAT1, DAT2, LAST}
930 + ldmneia S!, {DAT0, DAT1, DAT2, LAST}
931 + stmneia D!, {DAT0, DAT1, DAT2, LAST}
935 + unaligned_words backwards, align, 0, 4, DAT0, DAT1, DAT2, DAT3, LAST
938 + /* Trailing words and bytes */
944 + memcpy_trailing_15bytes backwards, align
946 + pop {DAT3, DAT4, DAT5, DAT6, DAT7}
947 + pop {D, DAT1, DAT2, pc}
950 +.macro memcpy_medium_inner_loop backwards, align
954 + ldmdb S!, {DAT0, DAT1, DAT2, LAST}
956 + ldr LAST, [S, #-4]!
957 + ldr DAT2, [S, #-4]!
958 + ldr DAT1, [S, #-4]!
959 + ldr DAT0, [S, #-4]!
961 + stmdb D!, {DAT0, DAT1, DAT2, LAST}
964 + ldmia S!, {DAT0, DAT1, DAT2, LAST}
971 + stmia D!, {DAT0, DAT1, DAT2, LAST}
975 + /* Trailing words and bytes */
978 + memcpy_trailing_15bytes backwards, align
980 + pop {D, DAT1, DAT2, pc}
983 +.macro memcpy_short_inner_loop backwards, align
987 + ldmnedb S!, {DAT0, DAT1, DAT2, LAST}
989 + ldrne LAST, [S, #-4]!
990 + ldrne DAT2, [S, #-4]!
991 + ldrne DAT1, [S, #-4]!
992 + ldrne DAT0, [S, #-4]!
994 + stmnedb D!, {DAT0, DAT1, DAT2, LAST}
997 + ldmneia S!, {DAT0, DAT1, DAT2, LAST}
999 + ldrne DAT0, [S], #4
1000 + ldrne DAT1, [S], #4
1001 + ldrne DAT2, [S], #4
1002 + ldrne LAST, [S], #4
1004 + stmneia D!, {DAT0, DAT1, DAT2, LAST}
1006 + memcpy_trailing_15bytes backwards, align
1008 + pop {D, DAT1, DAT2, pc}
1011 +.macro memcpy backwards
1028 + push {D, DAT1, DAT2, lr}
1030 + .cfi_def_cfa_offset 16
1031 + .cfi_rel_offset D, 0
1034 + .cfi_undefined DAT0
1035 + .cfi_rel_offset DAT1, 4
1036 + .cfi_rel_offset DAT2, 8
1037 + .cfi_undefined LAST
1038 + .cfi_rel_offset lr, 12
1045 + /* See if we're guaranteed to have at least one 16-byte aligned 16-byte write */
1048 + /* To preload ahead as we go, we need at least (prefetch_distance+2) 32-byte blocks */
1049 + cmp N, #(prefetch_distance+3)*32 - 1
1053 + push {DAT3, DAT4, DAT5, DAT6, DAT7}
1055 + .cfi_def_cfa_offset 36
1056 + .cfi_rel_offset D, 20
1057 + .cfi_rel_offset DAT1, 24
1058 + .cfi_rel_offset DAT2, 28
1059 + .cfi_rel_offset DAT3, 0
1060 + .cfi_rel_offset DAT4, 4
1061 + .cfi_rel_offset DAT5, 8
1062 + .cfi_rel_offset DAT6, 12
1063 + .cfi_rel_offset DAT7, 16
1064 + .cfi_rel_offset lr, 32
1066 + /* Adjust N so that the decrement instruction can also test for
1067 + * inner loop termination. We want it to stop when there are
1068 + * (prefetch_distance+1) complete blocks to go. */
1069 + sub N, N, #(prefetch_distance+2)*32
1070 + preload_leading_step1 backwards, DAT0, S
1072 + /* Bug in GAS: it accepts, but mis-assembles the instruction
1073 + * ands DAT2, D, #60, 2
1074 + * which sets DAT2 to the number of leading bytes until destination is aligned and also clears C (sets borrow)
1081 + rsb DAT2, DAT2, #16 /* number of leading bytes until destination aligned */
1083 + preload_leading_step2 backwards, DAT0, S, DAT2, OFF
1084 + memcpy_leading_15bytes backwards, 1
1085 +154: /* Destination now 16-byte aligned; we have at least one prefetch as well as at least one 16-byte output block */
1086 + /* Prefetch offset is best selected such that it lies in the first 8 of each 32 bytes - but it's just as easy to aim for the first one */
1090 + sub OFF, OFF, #32*(prefetch_distance+1)
1093 + rsb OFF, OFF, #32*prefetch_distance
1095 + movs DAT0, S, lsl #31
1099 + memcpy_long_inner_loop backwards, 0
1100 +155: memcpy_long_inner_loop backwards, 1
1101 +156: memcpy_long_inner_loop backwards, 2
1102 +157: memcpy_long_inner_loop backwards, 3
1104 + .cfi_def_cfa_offset 16
1105 + .cfi_rel_offset D, 0
1106 + .cfi_rel_offset DAT1, 4
1107 + .cfi_rel_offset DAT2, 8
1108 + .cfi_same_value DAT3
1109 + .cfi_same_value DAT4
1110 + .cfi_same_value DAT5
1111 + .cfi_same_value DAT6
1112 + .cfi_same_value DAT7
1113 + .cfi_rel_offset lr, 12
1115 +160: /* Medium case */
1116 + preload_all backwards, 0, 0, S, N, DAT2, OFF
1117 + sub N, N, #16 /* simplifies inner loop termination */
1124 + rsb DAT2, DAT2, #16
1126 + memcpy_leading_15bytes backwards, align
1127 +164: /* Destination now 16-byte aligned; we have at least one 16-byte output block */
1130 + memcpy_medium_inner_loop backwards, 0
1131 +140: memcpy_medium_inner_loop backwards, 1
1133 +170: /* Short case, less than 31 bytes, so no guarantee of at least one 16-byte block */
1136 + preload_all backwards, 1, 0, S, N, DAT2, LAST
1142 + ldrb DAT0, [S, #-1]!
1143 + strb DAT0, [D, #-1]!
1145 + ldrb DAT0, [S], #1
1146 + strb DAT0, [D], #1
1150 +174: /* Destination now 4-byte aligned; we have 0 or more output bytes to go */
1153 + memcpy_short_inner_loop backwards, 0
1154 +140: memcpy_short_inner_loop backwards, 1
1173 +++ b/arch/arm/lib/memmove_rpi.S
1176 +Copyright (c) 2013, Raspberry Pi Foundation
1177 +Copyright (c) 2013, RISC OS Open Ltd
1178 +All rights reserved.
1180 +Redistribution and use in source and binary forms, with or without
1181 +modification, are permitted provided that the following conditions are met:
1182 + * Redistributions of source code must retain the above copyright
1183 + notice, this list of conditions and the following disclaimer.
1184 + * Redistributions in binary form must reproduce the above copyright
1185 + notice, this list of conditions and the following disclaimer in the
1186 + documentation and/or other materials provided with the distribution.
1187 + * Neither the name of the copyright holder nor the
1188 + names of its contributors may be used to endorse or promote products
1189 + derived from this software without specific prior written permission.
1191 +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
1192 +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1193 +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1194 +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
1195 +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1196 +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1197 +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1198 +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1199 +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1200 +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1203 +#include <linux/linkage.h>
1204 +#include "arm-mem.h"
1205 +#include "memcpymove.h"
1207 +/* Prevent the stack from becoming executable */
1208 +#if defined(__linux__) && defined(__ELF__)
1209 +.section .note.GNU-stack,"",%progbits
1214 + .object_arch armv4
1220 + * void *memmove(void *s1, const void *s2, size_t n);
1222 + * a1 = pointer to destination
1223 + * a2 = pointer to source
1224 + * a3 = number of bytes to copy
1229 +.set prefetch_distance, 3
1233 + bpl memcpy /* pl works even over -1 - 0 and 0x7fffffff - 0x80000000 boundaries */
1237 +++ b/arch/arm/lib/memset_rpi.S
1240 +Copyright (c) 2013, Raspberry Pi Foundation
1241 +Copyright (c) 2013, RISC OS Open Ltd
1242 +All rights reserved.
1244 +Redistribution and use in source and binary forms, with or without
1245 +modification, are permitted provided that the following conditions are met:
1246 + * Redistributions of source code must retain the above copyright
1247 + notice, this list of conditions and the following disclaimer.
1248 + * Redistributions in binary form must reproduce the above copyright
1249 + notice, this list of conditions and the following disclaimer in the
1250 + documentation and/or other materials provided with the distribution.
1251 + * Neither the name of the copyright holder nor the
1252 + names of its contributors may be used to endorse or promote products
1253 + derived from this software without specific prior written permission.
1255 +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
1256 +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1257 +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1258 +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
1259 +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1260 +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1261 +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1262 +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1263 +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1264 +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1267 +#include <linux/linkage.h>
1268 +#include "arm-mem.h"
1270 +/* Prevent the stack from becoming executable */
1271 +#if defined(__linux__) && defined(__ELF__)
1272 +.section .note.GNU-stack,"",%progbits
1277 + .object_arch armv4
1283 + * void *memset(void *s, int c, size_t n);
1285 + * a1 = pointer to buffer to fill
1286 + * a2 = byte pattern to fill with (caller-narrowed)
1287 + * a3 = number of bytes to fill
1300 + orr DAT0, DAT0, lsl #8
1302 + orr DAT0, DAT0, lsl #16
1305 + /* See if we're guaranteed to have at least one 16-byte aligned 16-byte write */
1309 +161: sub N, N, #16 /* simplifies inner loop termination */
1310 + /* Leading words and bytes */
1313 + rsb DAT3, S, #0 /* bits 0-3 = number of leading bytes until aligned */
1314 + movs DAT2, DAT3, lsl #31
1316 + strmib DAT0, [S], #1
1318 + strcsh DAT0, [S], #2
1319 + movs DAT2, DAT3, lsl #29
1321 + strmi DAT0, [S], #4
1323 + stmcsia S!, {DAT0, DAT1}
1324 +164: /* Delayed set up of DAT2 and DAT3 so we could use them as scratch registers above */
1327 + /* Now the inner loop of 16-byte stores */
1328 +165: stmia S!, {DAT0, DAT1, DAT2, DAT3}
1331 +166: /* Trailing words and bytes */
1332 + movs N, N, lsl #29
1333 + stmcsia S!, {DAT0, DAT1}
1334 + strmi DAT0, [S], #4
1336 + strcsh DAT0, [S], #2
1340 +170: /* Short case */
1347 + strb DAT0, [S], #1
1351 + stmneia S!, {DAT0, DAT1, DAT2, DAT3}
1362 --- a/arch/arm/lib/uaccess_with_memcpy.c
1363 +++ b/arch/arm/lib/uaccess_with_memcpy.c
1365 #include <asm/current.h>
1366 #include <asm/page.h>
1368 +#ifndef COPY_FROM_USER_THRESHOLD
1369 +#define COPY_FROM_USER_THRESHOLD 64
1372 +#ifndef COPY_TO_USER_THRESHOLD
1373 +#define COPY_TO_USER_THRESHOLD 64
1377 pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
1379 @@ -85,7 +93,44 @@ pin_page_for_write(const void __user *_a
1383 -static unsigned long noinline
1385 +pin_page_for_read(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
1387 + unsigned long addr = (unsigned long)_addr;
1394 + pgd = pgd_offset(current->mm, addr);
1395 + if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
1399 + pud = pud_offset(pgd, addr);
1400 + if (unlikely(pud_none(*pud) || pud_bad(*pud)))
1405 + pmd = pmd_offset(pud, addr);
1406 + if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd)))
1409 + pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
1410 + if (unlikely(!pte_present(*pte) || !pte_young(*pte))) {
1411 + pte_unmap_unlock(pte, ptl);
1421 +unsigned long noinline
1422 __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
1424 unsigned long ua_flags;
1425 @@ -138,6 +183,54 @@ out:
1429 +unsigned long noinline
1430 +__copy_from_user_memcpy(void *to, const void __user *from, unsigned long n)
1434 + if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
1435 + memcpy(to, (const void *)from, n);
1439 + /* the mmap semaphore is taken only if not in an atomic context */
1440 + atomic = in_atomic();
1443 + down_read(¤t->mm->mmap_sem);
1449 + while (!pin_page_for_read(from, &pte, &ptl)) {
1452 + up_read(¤t->mm->mmap_sem);
1453 + if (__get_user(temp, (char __user *)from))
1456 + down_read(¤t->mm->mmap_sem);
1459 + tocopy = (~(unsigned long)from & ~PAGE_MASK) + 1;
1463 + memcpy(to, (const void *)from, tocopy);
1468 + pte_unmap_unlock(pte, ptl);
1471 + up_read(¤t->mm->mmap_sem);
1478 arm_copy_to_user(void __user *to, const void *from, unsigned long n)
1480 @@ -148,7 +241,7 @@ arm_copy_to_user(void __user *to, const
1481 * With frame pointer disabled, tail call optimization kicks in
1482 * as well making this test almost invisible.
1485 + if (n < COPY_TO_USER_THRESHOLD) {
1486 unsigned long ua_flags = uaccess_save_and_enable();
1487 n = __copy_to_user_std(to, from, n);
1488 uaccess_restore(ua_flags);
1489 @@ -157,6 +250,21 @@ arm_copy_to_user(void __user *to, const
1494 +unsigned long __must_check
1495 +arm_copy_from_user(void *to, const void __user *from, unsigned long n)
1498 + * This test is stubbed out of the main function above to keep
1499 + * the overhead for small copies low by avoiding a large
1500 + * register dump on the stack just to reload them right away.
1501 + * With frame pointer disabled, tail call optimization kicks in
1502 + * as well making this test almost invisible.
1504 + if (n < COPY_FROM_USER_THRESHOLD)
1505 + return __copy_from_user_std(to, from, n);
1506 + return __copy_from_user_memcpy(to, from, n);
1509 static unsigned long noinline
1510 __clear_user_memset(void __user *addr, unsigned long n)