71095f1921a087a1265933ca4cd0d4f816b2d29b
[openwrt/staging/blogic.git] / backport / compat / backport-4.0.c
1 /*
2 * Copyright (c) 2015 Hauke Mehrtens <hauke@hauke-m.de>
3 *
4 * Backport functionality introduced in Linux 4.0.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/sched.h>
14 #include <linux/ctype.h>
15 #include <linux/printk.h>
16 #include <linux/export.h>
17 #include <linux/trace_seq.h>
18 #include <linux/ftrace_event.h>
19 #include <asm/unaligned.h>
20
21 static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
22 struct mm_struct *mm,
23 unsigned long start,
24 unsigned long nr_pages,
25 int write, int force,
26 struct page **pages,
27 struct vm_area_struct **vmas,
28 int *locked, bool notify_drop,
29 unsigned int flags)
30 {
31 long ret, pages_done;
32 bool lock_dropped;
33
34 if (locked) {
35 /* if VM_FAULT_RETRY can be returned, vmas become invalid */
36 BUG_ON(vmas);
37 /* check caller initialized locked */
38 BUG_ON(*locked != 1);
39 }
40
41 if (pages)
42 flags |= FOLL_GET;
43 if (write)
44 flags |= FOLL_WRITE;
45 if (force)
46 flags |= FOLL_FORCE;
47
48 pages_done = 0;
49 lock_dropped = false;
50 for (;;) {
51 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
52 vmas, locked);
53 if (!locked)
54 /* VM_FAULT_RETRY couldn't trigger, bypass */
55 return ret;
56
57 /* VM_FAULT_RETRY cannot return errors */
58 if (!*locked) {
59 BUG_ON(ret < 0);
60 BUG_ON(ret >= nr_pages);
61 }
62
63 if (!pages)
64 /* If it's a prefault don't insist harder */
65 return ret;
66
67 if (ret > 0) {
68 nr_pages -= ret;
69 pages_done += ret;
70 if (!nr_pages)
71 break;
72 }
73 if (*locked) {
74 /* VM_FAULT_RETRY didn't trigger */
75 if (!pages_done)
76 pages_done = ret;
77 break;
78 }
79 /* VM_FAULT_RETRY triggered, so seek to the faulting offset */
80 pages += ret;
81 start += ret << PAGE_SHIFT;
82
83 /*
84 * Repeat on the address that fired VM_FAULT_RETRY
85 * without FAULT_FLAG_ALLOW_RETRY but with
86 * FAULT_FLAG_TRIED.
87 */
88 *locked = 1;
89 lock_dropped = true;
90 down_read(&mm->mmap_sem);
91 ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
92 pages, NULL, NULL);
93 if (ret != 1) {
94 BUG_ON(ret > 1);
95 if (!pages_done)
96 pages_done = ret;
97 break;
98 }
99 nr_pages--;
100 pages_done++;
101 if (!nr_pages)
102 break;
103 pages++;
104 start += PAGE_SIZE;
105 }
106 if (notify_drop && lock_dropped && *locked) {
107 /*
108 * We must let the caller know we temporarily dropped the lock
109 * and so the critical section protected by it was lost.
110 */
111 up_read(&mm->mmap_sem);
112 *locked = 0;
113 }
114 return pages_done;
115 }
116
117 /*
118 * We can leverage the VM_FAULT_RETRY functionality in the page fault
119 * paths better by using either get_user_pages_locked() or
120 * get_user_pages_unlocked().
121 *
122 * get_user_pages_locked() is suitable to replace the form:
123 *
124 * down_read(&mm->mmap_sem);
125 * do_something()
126 * get_user_pages(tsk, mm, ..., pages, NULL);
127 * up_read(&mm->mmap_sem);
128 *
129 * to:
130 *
131 * int locked = 1;
132 * down_read(&mm->mmap_sem);
133 * do_something()
134 * get_user_pages_locked(tsk, mm, ..., pages, &locked);
135 * if (locked)
136 * up_read(&mm->mmap_sem);
137 */
138 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
139 int write, int force, struct page **pages,
140 int *locked)
141 {
142 return __get_user_pages_locked(current, current->mm, start, nr_pages,
143 write, force, pages, NULL, locked, true,
144 FOLL_TOUCH);
145 }
146 EXPORT_SYMBOL_GPL(get_user_pages_locked);
147
148 /*
149 * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
150 * pass additional gup_flags as last parameter (like FOLL_HWPOISON).
151 *
152 * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the
153 * caller if required (just like with __get_user_pages). "FOLL_GET",
154 * "FOLL_WRITE" and "FOLL_FORCE" are set implicitly as needed
155 * according to the parameters "pages", "write", "force"
156 * respectively.
157 */
158 static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
159 unsigned long start, unsigned long nr_pages,
160 int write, int force, struct page **pages,
161 unsigned int gup_flags)
162 {
163 long ret;
164 int locked = 1;
165 down_read(&mm->mmap_sem);
166 ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
167 pages, NULL, &locked, false, gup_flags);
168 if (locked)
169 up_read(&mm->mmap_sem);
170 return ret;
171 }
172
173 /*
174 * get_user_pages_unlocked() is suitable to replace the form:
175 *
176 * down_read(&mm->mmap_sem);
177 * get_user_pages(tsk, mm, ..., pages, NULL);
178 * up_read(&mm->mmap_sem);
179 *
180 * with:
181 *
182 * get_user_pages_unlocked(tsk, mm, ..., pages);
183 *
184 * It is functionally equivalent to get_user_pages_fast so
185 * get_user_pages_fast should be used instead, if the two parameters
186 * "tsk" and "mm" are respectively equal to current and current->mm,
187 * or if "force" shall be set to 1 (get_user_pages_fast misses the
188 * "force" parameter).
189 */
190 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
191 int write, int force, struct page **pages)
192 {
193 return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
194 write, force, pages, FOLL_TOUCH);
195 }
196 EXPORT_SYMBOL_GPL(get_user_pages_unlocked);
197
198
199 /**
200 * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
201 * @buf: data blob to dump
202 * @len: number of bytes in the @buf
203 * @rowsize: number of bytes to print per line; must be 16 or 32
204 * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
205 * @linebuf: where to put the converted data
206 * @linebuflen: total size of @linebuf, including space for terminating NUL
207 * @ascii: include ASCII after the hex output
208 *
209 * hex_dump_to_buffer() works on one "line" of output at a time, i.e.,
210 * 16 or 32 bytes of input data converted to hex + ASCII output.
211 *
212 * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data
213 * to a hex + ASCII dump at the supplied memory location.
214 * The converted output is always NUL-terminated.
215 *
216 * E.g.:
217 * hex_dump_to_buffer(frame->data, frame->len, 16, 1,
218 * linebuf, sizeof(linebuf), true);
219 *
220 * example output buffer:
221 * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
222 *
223 * Return:
224 * The amount of bytes placed in the buffer without terminating NUL. If the
225 * output was truncated, then the return value is the number of bytes
226 * (excluding the terminating NUL) which would have been written to the final
227 * string if enough space had been available.
228 */
229 int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
230 char *linebuf, size_t linebuflen, bool ascii)
231 {
232 const u8 *ptr = buf;
233 int ngroups;
234 u8 ch;
235 int j, lx = 0;
236 int ascii_column;
237 int ret;
238
239 if (rowsize != 16 && rowsize != 32)
240 rowsize = 16;
241
242 if (len > rowsize) /* limit to one line at a time */
243 len = rowsize;
244 if (!is_power_of_2(groupsize) || groupsize > 8)
245 groupsize = 1;
246 if ((len % groupsize) != 0) /* no mixed size output */
247 groupsize = 1;
248
249 ngroups = len / groupsize;
250 ascii_column = rowsize * 2 + rowsize / groupsize + 1;
251
252 if (!linebuflen)
253 goto overflow1;
254
255 if (!len)
256 goto nil;
257
258 if (groupsize == 8) {
259 const u64 *ptr8 = buf;
260
261 for (j = 0; j < ngroups; j++) {
262 ret = snprintf(linebuf + lx, linebuflen - lx,
263 "%s%16.16llx", j ? " " : "",
264 get_unaligned(ptr8 + j));
265 if (ret >= linebuflen - lx)
266 goto overflow1;
267 lx += ret;
268 }
269 } else if (groupsize == 4) {
270 const u32 *ptr4 = buf;
271
272 for (j = 0; j < ngroups; j++) {
273 ret = snprintf(linebuf + lx, linebuflen - lx,
274 "%s%8.8x", j ? " " : "",
275 get_unaligned(ptr4 + j));
276 if (ret >= linebuflen - lx)
277 goto overflow1;
278 lx += ret;
279 }
280 } else if (groupsize == 2) {
281 const u16 *ptr2 = buf;
282
283 for (j = 0; j < ngroups; j++) {
284 ret = snprintf(linebuf + lx, linebuflen - lx,
285 "%s%4.4x", j ? " " : "",
286 get_unaligned(ptr2 + j));
287 if (ret >= linebuflen - lx)
288 goto overflow1;
289 lx += ret;
290 }
291 } else {
292 for (j = 0; j < len; j++) {
293 if (linebuflen < lx + 3)
294 goto overflow2;
295 ch = ptr[j];
296 linebuf[lx++] = hex_asc_hi(ch);
297 linebuf[lx++] = hex_asc_lo(ch);
298 linebuf[lx++] = ' ';
299 }
300 if (j)
301 lx--;
302 }
303 if (!ascii)
304 goto nil;
305
306 while (lx < ascii_column) {
307 if (linebuflen < lx + 2)
308 goto overflow2;
309 linebuf[lx++] = ' ';
310 }
311 for (j = 0; j < len; j++) {
312 if (linebuflen < lx + 2)
313 goto overflow2;
314 ch = ptr[j];
315 linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
316 }
317 nil:
318 linebuf[lx] = '\0';
319 return lx;
320 overflow2:
321 linebuf[lx++] = '\0';
322 overflow1:
323 return ascii ? ascii_column + len : (groupsize * 2 + 1) * ngroups - 1;
324 }
325 EXPORT_SYMBOL_GPL(hex_dump_to_buffer);
326
327 const char *
328 ftrace_print_array_seq(struct trace_seq *p, const void *buf, int buf_len,
329 size_t el_size)
330 {
331 const char *ret = trace_seq_buffer_ptr(p);
332 const char *prefix = "";
333 void *ptr = (void *)buf;
334
335 trace_seq_putc(p, '{');
336
337 while (ptr < buf + buf_len) {
338 switch (el_size) {
339 case 1:
340 trace_seq_printf(p, "%s0x%x", prefix,
341 *(u8 *)ptr);
342 break;
343 case 2:
344 trace_seq_printf(p, "%s0x%x", prefix,
345 *(u16 *)ptr);
346 break;
347 case 4:
348 trace_seq_printf(p, "%s0x%x", prefix,
349 *(u32 *)ptr);
350 break;
351 case 8:
352 trace_seq_printf(p, "%s0x%llx", prefix,
353 *(u64 *)ptr);
354 break;
355 default:
356 trace_seq_printf(p, "BAD SIZE:%zu 0x%x", el_size,
357 *(u8 *)ptr);
358 el_size = 1;
359 }
360 prefix = ",";
361 ptr += el_size;
362 }
363
364 trace_seq_putc(p, '}');
365 trace_seq_putc(p, 0);
366
367 return ret;
368 }
369 EXPORT_SYMBOL(ftrace_print_array_seq);