kernel: Fix arm64 include files
[openwrt/staging/dedeckeh.git] / target / linux / generic / backport-4.19 / 210-arm64-sve-Disentangle-uapi-asm-ptrace.h-from-uapi-as.patch
1 From 9966a05c7b80f075f2bc7e48dbb108d3f2927234 Mon Sep 17 00:00:00 2001
2 From: Dave Martin <Dave.Martin@arm.com>
3 Date: Fri, 4 Jan 2019 13:09:51 +0000
4 Subject: [PATCH] arm64/sve: Disentangle <uapi/asm/ptrace.h> from
5 <uapi/asm/sigcontext.h>
6
7 Currently, <uapi/asm/sigcontext.h> provides common definitions for
8 describing SVE context structures that are also used by the ptrace
9 definitions in <uapi/asm/ptrace.h>.
10
11 For this reason, a #include of <asm/sigcontext.h> was added in
12 ptrace.h, but it this turns out that this can interact badly with
13 userspace code that tries to include ptrace.h on top of the libc
14 headers (which may provide their own shadow definitions for
15 sigcontext.h).
16
17 To make the headers easier for userspace to consume, this patch
18 bounces the common definitions into an __SVE_* namespace and moves
19 them to a backend header <uapi/asm/sve_context.h> that can be
20 included by the other headers as appropriate. This should allow
21 ptrace.h to be used alongside libc's sigcontext.h (if any) without
22 ill effects.
23
24 This should make the situation unambiguous: <asm/sigcontext.h> is
25 the header to include for the sigframe-specific definitions, while
26 <asm/ptrace.h> is the header to include for ptrace-specific
27 definitions.
28
29 To avoid conflicting with existing usage, <asm/sigcontext.h>
30 remains the canonical way to get the common definitions for
31 SVE_VQ_MIN, sve_vq_from_vl() etc., both in userspace and in the
32 kernel: relying on these being defined as a side effect of
33 including just <asm/ptrace.h> was never intended to be safe.
34
35 Signed-off-by: Dave Martin <Dave.Martin@arm.com>
36 Signed-off-by: Will Deacon <will.deacon@arm.com>
37 ---
38 arch/arm64/include/uapi/asm/ptrace.h | 39 ++++++++--------
39 arch/arm64/include/uapi/asm/sigcontext.h | 56 +++++++++++------------
40 arch/arm64/include/uapi/asm/sve_context.h | 53 +++++++++++++++++++++
41 3 files changed, 99 insertions(+), 49 deletions(-)
42 create mode 100644 arch/arm64/include/uapi/asm/sve_context.h
43
44 --- a/arch/arm64/include/uapi/asm/ptrace.h
45 +++ b/arch/arm64/include/uapi/asm/ptrace.h
46 @@ -23,7 +23,7 @@
47 #include <linux/types.h>
48
49 #include <asm/hwcap.h>
50 -#include <asm/sigcontext.h>
51 +#include <asm/sve_context.h>
52
53
54 /*
55 @@ -128,9 +128,9 @@ struct user_sve_header {
56 */
57
58 /* Offset from the start of struct user_sve_header to the register data */
59 -#define SVE_PT_REGS_OFFSET \
60 - ((sizeof(struct user_sve_header) + (SVE_VQ_BYTES - 1)) \
61 - / SVE_VQ_BYTES * SVE_VQ_BYTES)
62 +#define SVE_PT_REGS_OFFSET \
63 + ((sizeof(struct user_sve_header) + (__SVE_VQ_BYTES - 1)) \
64 + / __SVE_VQ_BYTES * __SVE_VQ_BYTES)
65
66 /*
67 * The register data content and layout depends on the value of the
68 @@ -176,39 +176,36 @@ struct user_sve_header {
69 * Additional data might be appended in the future.
70 */
71
72 -#define SVE_PT_SVE_ZREG_SIZE(vq) SVE_SIG_ZREG_SIZE(vq)
73 -#define SVE_PT_SVE_PREG_SIZE(vq) SVE_SIG_PREG_SIZE(vq)
74 -#define SVE_PT_SVE_FFR_SIZE(vq) SVE_SIG_FFR_SIZE(vq)
75 +#define SVE_PT_SVE_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq)
76 +#define SVE_PT_SVE_PREG_SIZE(vq) __SVE_PREG_SIZE(vq)
77 +#define SVE_PT_SVE_FFR_SIZE(vq) __SVE_FFR_SIZE(vq)
78 #define SVE_PT_SVE_FPSR_SIZE sizeof(__u32)
79 #define SVE_PT_SVE_FPCR_SIZE sizeof(__u32)
80
81 -#define __SVE_SIG_TO_PT(offset) \
82 - ((offset) - SVE_SIG_REGS_OFFSET + SVE_PT_REGS_OFFSET)
83 -
84 #define SVE_PT_SVE_OFFSET SVE_PT_REGS_OFFSET
85
86 #define SVE_PT_SVE_ZREGS_OFFSET \
87 - __SVE_SIG_TO_PT(SVE_SIG_ZREGS_OFFSET)
88 + (SVE_PT_REGS_OFFSET + __SVE_ZREGS_OFFSET)
89 #define SVE_PT_SVE_ZREG_OFFSET(vq, n) \
90 - __SVE_SIG_TO_PT(SVE_SIG_ZREG_OFFSET(vq, n))
91 + (SVE_PT_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n))
92 #define SVE_PT_SVE_ZREGS_SIZE(vq) \
93 - (SVE_PT_SVE_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET)
94 + (SVE_PT_SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET)
95
96 #define SVE_PT_SVE_PREGS_OFFSET(vq) \
97 - __SVE_SIG_TO_PT(SVE_SIG_PREGS_OFFSET(vq))
98 + (SVE_PT_REGS_OFFSET + __SVE_PREGS_OFFSET(vq))
99 #define SVE_PT_SVE_PREG_OFFSET(vq, n) \
100 - __SVE_SIG_TO_PT(SVE_SIG_PREG_OFFSET(vq, n))
101 + (SVE_PT_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n))
102 #define SVE_PT_SVE_PREGS_SIZE(vq) \
103 - (SVE_PT_SVE_PREG_OFFSET(vq, SVE_NUM_PREGS) - \
104 + (SVE_PT_SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - \
105 SVE_PT_SVE_PREGS_OFFSET(vq))
106
107 #define SVE_PT_SVE_FFR_OFFSET(vq) \
108 - __SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq))
109 + (SVE_PT_REGS_OFFSET + __SVE_FFR_OFFSET(vq))
110
111 #define SVE_PT_SVE_FPSR_OFFSET(vq) \
112 ((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) + \
113 - (SVE_VQ_BYTES - 1)) \
114 - / SVE_VQ_BYTES * SVE_VQ_BYTES)
115 + (__SVE_VQ_BYTES - 1)) \
116 + / __SVE_VQ_BYTES * __SVE_VQ_BYTES)
117 #define SVE_PT_SVE_FPCR_OFFSET(vq) \
118 (SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE)
119
120 @@ -219,8 +216,8 @@ struct user_sve_header {
121
122 #define SVE_PT_SVE_SIZE(vq, flags) \
123 ((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE \
124 - - SVE_PT_SVE_OFFSET + (SVE_VQ_BYTES - 1)) \
125 - / SVE_VQ_BYTES * SVE_VQ_BYTES)
126 + - SVE_PT_SVE_OFFSET + (__SVE_VQ_BYTES - 1)) \
127 + / __SVE_VQ_BYTES * __SVE_VQ_BYTES)
128
129 #define SVE_PT_SIZE(vq, flags) \
130 (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \
131 --- a/arch/arm64/include/uapi/asm/sigcontext.h
132 +++ b/arch/arm64/include/uapi/asm/sigcontext.h
133 @@ -130,6 +130,8 @@ struct sve_context {
134
135 #endif /* !__ASSEMBLY__ */
136
137 +#include <asm/sve_context.h>
138 +
139 /*
140 * The SVE architecture leaves space for future expansion of the
141 * vector length beyond its initial architectural limit of 2048 bits
142 @@ -138,21 +140,20 @@ struct sve_context {
143 * See linux/Documentation/arm64/sve.txt for a description of the VL/VQ
144 * terminology.
145 */
146 -#define SVE_VQ_BYTES 16 /* number of bytes per quadword */
147 +#define SVE_VQ_BYTES __SVE_VQ_BYTES /* bytes per quadword */
148
149 -#define SVE_VQ_MIN 1
150 -#define SVE_VQ_MAX 512
151 +#define SVE_VQ_MIN __SVE_VQ_MIN
152 +#define SVE_VQ_MAX __SVE_VQ_MAX
153
154 -#define SVE_VL_MIN (SVE_VQ_MIN * SVE_VQ_BYTES)
155 -#define SVE_VL_MAX (SVE_VQ_MAX * SVE_VQ_BYTES)
156 +#define SVE_VL_MIN __SVE_VL_MIN
157 +#define SVE_VL_MAX __SVE_VL_MAX
158
159 -#define SVE_NUM_ZREGS 32
160 -#define SVE_NUM_PREGS 16
161 +#define SVE_NUM_ZREGS __SVE_NUM_ZREGS
162 +#define SVE_NUM_PREGS __SVE_NUM_PREGS
163
164 -#define sve_vl_valid(vl) \
165 - ((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX)
166 -#define sve_vq_from_vl(vl) ((vl) / SVE_VQ_BYTES)
167 -#define sve_vl_from_vq(vq) ((vq) * SVE_VQ_BYTES)
168 +#define sve_vl_valid(vl) __sve_vl_valid(vl)
169 +#define sve_vq_from_vl(vl) __sve_vq_from_vl(vl)
170 +#define sve_vl_from_vq(vq) __sve_vl_from_vq(vq)
171
172 /*
173 * If the SVE registers are currently live for the thread at signal delivery,
174 @@ -205,34 +206,33 @@ struct sve_context {
175 * Additional data might be appended in the future.
176 */
177
178 -#define SVE_SIG_ZREG_SIZE(vq) ((__u32)(vq) * SVE_VQ_BYTES)
179 -#define SVE_SIG_PREG_SIZE(vq) ((__u32)(vq) * (SVE_VQ_BYTES / 8))
180 -#define SVE_SIG_FFR_SIZE(vq) SVE_SIG_PREG_SIZE(vq)
181 +#define SVE_SIG_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq)
182 +#define SVE_SIG_PREG_SIZE(vq) __SVE_PREG_SIZE(vq)
183 +#define SVE_SIG_FFR_SIZE(vq) __SVE_FFR_SIZE(vq)
184
185 #define SVE_SIG_REGS_OFFSET \
186 - ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \
187 - / SVE_VQ_BYTES * SVE_VQ_BYTES)
188 + ((sizeof(struct sve_context) + (__SVE_VQ_BYTES - 1)) \
189 + / __SVE_VQ_BYTES * __SVE_VQ_BYTES)
190
191 -#define SVE_SIG_ZREGS_OFFSET SVE_SIG_REGS_OFFSET
192 +#define SVE_SIG_ZREGS_OFFSET \
193 + (SVE_SIG_REGS_OFFSET + __SVE_ZREGS_OFFSET)
194 #define SVE_SIG_ZREG_OFFSET(vq, n) \
195 - (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREG_SIZE(vq) * (n))
196 -#define SVE_SIG_ZREGS_SIZE(vq) \
197 - (SVE_SIG_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_SIG_ZREGS_OFFSET)
198 + (SVE_SIG_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n))
199 +#define SVE_SIG_ZREGS_SIZE(vq) __SVE_ZREGS_SIZE(vq)
200
201 #define SVE_SIG_PREGS_OFFSET(vq) \
202 - (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREGS_SIZE(vq))
203 + (SVE_SIG_REGS_OFFSET + __SVE_PREGS_OFFSET(vq))
204 #define SVE_SIG_PREG_OFFSET(vq, n) \
205 - (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREG_SIZE(vq) * (n))
206 -#define SVE_SIG_PREGS_SIZE(vq) \
207 - (SVE_SIG_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_SIG_PREGS_OFFSET(vq))
208 + (SVE_SIG_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n))
209 +#define SVE_SIG_PREGS_SIZE(vq) __SVE_PREGS_SIZE(vq)
210
211 #define SVE_SIG_FFR_OFFSET(vq) \
212 - (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREGS_SIZE(vq))
213 + (SVE_SIG_REGS_OFFSET + __SVE_FFR_OFFSET(vq))
214
215 #define SVE_SIG_REGS_SIZE(vq) \
216 - (SVE_SIG_FFR_OFFSET(vq) + SVE_SIG_FFR_SIZE(vq) - SVE_SIG_REGS_OFFSET)
217 -
218 -#define SVE_SIG_CONTEXT_SIZE(vq) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
219 + (__SVE_FFR_OFFSET(vq) + __SVE_FFR_SIZE(vq))
220
221 +#define SVE_SIG_CONTEXT_SIZE(vq) \
222 + (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
223
224 #endif /* _UAPI__ASM_SIGCONTEXT_H */
225 --- /dev/null
226 +++ b/arch/arm64/include/uapi/asm/sve_context.h
227 @@ -0,0 +1,53 @@
228 +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
229 +/* Copyright (C) 2017-2018 ARM Limited */
230 +
231 +/*
232 + * For use by other UAPI headers only.
233 + * Do not make direct use of header or its definitions.
234 + */
235 +
236 +#ifndef _UAPI__ASM_SVE_CONTEXT_H
237 +#define _UAPI__ASM_SVE_CONTEXT_H
238 +
239 +#include <linux/types.h>
240 +
241 +#define __SVE_VQ_BYTES 16 /* number of bytes per quadword */
242 +
243 +#define __SVE_VQ_MIN 1
244 +#define __SVE_VQ_MAX 512
245 +
246 +#define __SVE_VL_MIN (__SVE_VQ_MIN * __SVE_VQ_BYTES)
247 +#define __SVE_VL_MAX (__SVE_VQ_MAX * __SVE_VQ_BYTES)
248 +
249 +#define __SVE_NUM_ZREGS 32
250 +#define __SVE_NUM_PREGS 16
251 +
252 +#define __sve_vl_valid(vl) \
253 + ((vl) % __SVE_VQ_BYTES == 0 && \
254 + (vl) >= __SVE_VL_MIN && \
255 + (vl) <= __SVE_VL_MAX)
256 +
257 +#define __sve_vq_from_vl(vl) ((vl) / __SVE_VQ_BYTES)
258 +#define __sve_vl_from_vq(vq) ((vq) * __SVE_VQ_BYTES)
259 +
260 +#define __SVE_ZREG_SIZE(vq) ((__u32)(vq) * __SVE_VQ_BYTES)
261 +#define __SVE_PREG_SIZE(vq) ((__u32)(vq) * (__SVE_VQ_BYTES / 8))
262 +#define __SVE_FFR_SIZE(vq) __SVE_PREG_SIZE(vq)
263 +
264 +#define __SVE_ZREGS_OFFSET 0
265 +#define __SVE_ZREG_OFFSET(vq, n) \
266 + (__SVE_ZREGS_OFFSET + __SVE_ZREG_SIZE(vq) * (n))
267 +#define __SVE_ZREGS_SIZE(vq) \
268 + (__SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - __SVE_ZREGS_OFFSET)
269 +
270 +#define __SVE_PREGS_OFFSET(vq) \
271 + (__SVE_ZREGS_OFFSET + __SVE_ZREGS_SIZE(vq))
272 +#define __SVE_PREG_OFFSET(vq, n) \
273 + (__SVE_PREGS_OFFSET(vq) + __SVE_PREG_SIZE(vq) * (n))
274 +#define __SVE_PREGS_SIZE(vq) \
275 + (__SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - __SVE_PREGS_OFFSET(vq))
276 +
277 +#define __SVE_FFR_OFFSET(vq) \
278 + (__SVE_PREGS_OFFSET(vq) + __SVE_PREGS_SIZE(vq))
279 +
280 +#endif /* ! _UAPI__ASM_SVE_CONTEXT_H */