mediatek: Add support for Xiaomi Redmi Router AX6S
[openwrt/staging/mkresin.git] / target / linux / generic / backport-5.4 / 041-v5.5-arm64-Implement-optimised-checksum-routine.patch
1 From: Robin Murphy <robin.murphy@arm.com>
2 Date: Wed, 15 Jan 2020 16:42:39 +0000
3 Subject: [PATCH] arm64: Implement optimised checksum routine
4
5 Apparently there exist certain workloads which rely heavily on software
6 checksumming, for which the generic do_csum() implementation becomes a
7 significant bottleneck. Therefore let's give arm64 its own optimised
8 version - for ease of maintenance this foregoes assembly or intrisics,
9 and is thus not actually arm64-specific, but does rely heavily on C
10 idioms that translate well to the A64 ISA and the typical load/store
11 capabilities of most ARMv8 CPU cores.
12
13 The resulting increase in checksum throughput scales nicely with buffer
14 size, tending towards 4x for a small in-order core (Cortex-A53), and up
15 to 6x or more for an aggressive big core (Ampere eMAG).
16
17 Reported-by: Lingyan Huang <huanglingyan2@huawei.com>
18 Tested-by: Lingyan Huang <huanglingyan2@huawei.com>
19 Signed-off-by: Robin Murphy <robin.murphy@arm.com>
20 Signed-off-by: Will Deacon <will@kernel.org>
21 ---
22 create mode 100644 arch/arm64/lib/csum.c
23
24 --- a/arch/arm64/include/asm/checksum.h
25 +++ b/arch/arm64/include/asm/checksum.h
26 @@ -36,6 +36,9 @@ static inline __sum16 ip_fast_csum(const
27 }
28 #define ip_fast_csum ip_fast_csum
29
30 +extern unsigned int do_csum(const unsigned char *buff, int len);
31 +#define do_csum do_csum
32 +
33 #include <asm-generic/checksum.h>
34
35 #endif /* __ASM_CHECKSUM_H */
36 --- a/arch/arm64/lib/Makefile
37 +++ b/arch/arm64/lib/Makefile
38 @@ -1,9 +1,9 @@
39 # SPDX-License-Identifier: GPL-2.0
40 lib-y := clear_user.o delay.o copy_from_user.o \
41 copy_to_user.o copy_in_user.o copy_page.o \
42 - clear_page.o memchr.o memcpy.o memmove.o memset.o \
43 - memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \
44 - strchr.o strrchr.o tishift.o
45 + clear_page.o csum.o memchr.o memcpy.o memmove.o \
46 + memset.o memcmp.o strcmp.o strncmp.o strlen.o \
47 + strnlen.o strchr.o strrchr.o tishift.o
48
49 ifeq ($(CONFIG_KERNEL_MODE_NEON), y)
50 obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o
51 --- /dev/null
52 +++ b/arch/arm64/lib/csum.c
53 @@ -0,0 +1,123 @@
54 +// SPDX-License-Identifier: GPL-2.0-only
55 +// Copyright (C) 2019-2020 Arm Ltd.
56 +
57 +#include <linux/compiler.h>
58 +#include <linux/kasan-checks.h>
59 +#include <linux/kernel.h>
60 +
61 +#include <net/checksum.h>
62 +
63 +/* Looks dumb, but generates nice-ish code */
64 +static u64 accumulate(u64 sum, u64 data)
65 +{
66 + __uint128_t tmp = (__uint128_t)sum + data;
67 + return tmp + (tmp >> 64);
68 +}
69 +
70 +unsigned int do_csum(const unsigned char *buff, int len)
71 +{
72 + unsigned int offset, shift, sum;
73 + const u64 *ptr;
74 + u64 data, sum64 = 0;
75 +
76 + offset = (unsigned long)buff & 7;
77 + /*
78 + * This is to all intents and purposes safe, since rounding down cannot
79 + * result in a different page or cache line being accessed, and @buff
80 + * should absolutely not be pointing to anything read-sensitive. We do,
81 + * however, have to be careful not to piss off KASAN, which means using
82 + * unchecked reads to accommodate the head and tail, for which we'll
83 + * compensate with an explicit check up-front.
84 + */
85 + kasan_check_read(buff, len);
86 + ptr = (u64 *)(buff - offset);
87 + len = len + offset - 8;
88 +
89 + /*
90 + * Head: zero out any excess leading bytes. Shifting back by the same
91 + * amount should be at least as fast as any other way of handling the
92 + * odd/even alignment, and means we can ignore it until the very end.
93 + */
94 + shift = offset * 8;
95 + data = READ_ONCE_NOCHECK(*ptr++);
96 +#ifdef __LITTLE_ENDIAN
97 + data = (data >> shift) << shift;
98 +#else
99 + data = (data << shift) >> shift;
100 +#endif
101 +
102 + /*
103 + * Body: straightforward aligned loads from here on (the paired loads
104 + * underlying the quadword type still only need dword alignment). The
105 + * main loop strictly excludes the tail, so the second loop will always
106 + * run at least once.
107 + */
108 + while (unlikely(len > 64)) {
109 + __uint128_t tmp1, tmp2, tmp3, tmp4;
110 +
111 + tmp1 = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
112 + tmp2 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 2));
113 + tmp3 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 4));
114 + tmp4 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 6));
115 +
116 + len -= 64;
117 + ptr += 8;
118 +
119 + /* This is the "don't dump the carry flag into a GPR" idiom */
120 + tmp1 += (tmp1 >> 64) | (tmp1 << 64);
121 + tmp2 += (tmp2 >> 64) | (tmp2 << 64);
122 + tmp3 += (tmp3 >> 64) | (tmp3 << 64);
123 + tmp4 += (tmp4 >> 64) | (tmp4 << 64);
124 + tmp1 = ((tmp1 >> 64) << 64) | (tmp2 >> 64);
125 + tmp1 += (tmp1 >> 64) | (tmp1 << 64);
126 + tmp3 = ((tmp3 >> 64) << 64) | (tmp4 >> 64);
127 + tmp3 += (tmp3 >> 64) | (tmp3 << 64);
128 + tmp1 = ((tmp1 >> 64) << 64) | (tmp3 >> 64);
129 + tmp1 += (tmp1 >> 64) | (tmp1 << 64);
130 + tmp1 = ((tmp1 >> 64) << 64) | sum64;
131 + tmp1 += (tmp1 >> 64) | (tmp1 << 64);
132 + sum64 = tmp1 >> 64;
133 + }
134 + while (len > 8) {
135 + __uint128_t tmp;
136 +
137 + sum64 = accumulate(sum64, data);
138 + tmp = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
139 +
140 + len -= 16;
141 + ptr += 2;
142 +
143 +#ifdef __LITTLE_ENDIAN
144 + data = tmp >> 64;
145 + sum64 = accumulate(sum64, tmp);
146 +#else
147 + data = tmp;
148 + sum64 = accumulate(sum64, tmp >> 64);
149 +#endif
150 + }
151 + if (len > 0) {
152 + sum64 = accumulate(sum64, data);
153 + data = READ_ONCE_NOCHECK(*ptr);
154 + len -= 8;
155 + }
156 + /*
157 + * Tail: zero any over-read bytes similarly to the head, again
158 + * preserving odd/even alignment.
159 + */
160 + shift = len * -8;
161 +#ifdef __LITTLE_ENDIAN
162 + data = (data << shift) >> shift;
163 +#else
164 + data = (data >> shift) << shift;
165 +#endif
166 + sum64 = accumulate(sum64, data);
167 +
168 + /* Finally, folding */
169 + sum64 += (sum64 >> 32) | (sum64 << 32);
170 + sum = sum64 >> 32;
171 + sum += (sum >> 16) | (sum << 16);
172 + if (offset & 1)
173 + return (u16)swab32(sum);
174 +
175 + return sum >> 16;
176 +}