71095f1921a087a1265933ca4cd0d4f816b2d29b
2 * Copyright (c) 2015 Hauke Mehrtens <hauke@hauke-m.de>
4 * Backport functionality introduced in Linux 4.0.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/ctype.h>
15 #include <linux/printk.h>
16 #include <linux/export.h>
17 #include <linux/trace_seq.h>
18 #include <linux/ftrace_event.h>
19 #include <asm/unaligned.h>
21 static __always_inline
long __get_user_pages_locked(struct task_struct
*tsk
,
24 unsigned long nr_pages
,
27 struct vm_area_struct
**vmas
,
28 int *locked
, bool notify_drop
,
35 /* if VM_FAULT_RETRY can be returned, vmas become invalid */
37 /* check caller initialized locked */
51 ret
= __get_user_pages(tsk
, mm
, start
, nr_pages
, flags
, pages
,
54 /* VM_FAULT_RETRY couldn't trigger, bypass */
57 /* VM_FAULT_RETRY cannot return errors */
60 BUG_ON(ret
>= nr_pages
);
64 /* If it's a prefault don't insist harder */
74 /* VM_FAULT_RETRY didn't trigger */
79 /* VM_FAULT_RETRY triggered, so seek to the faulting offset */
81 start
+= ret
<< PAGE_SHIFT
;
84 * Repeat on the address that fired VM_FAULT_RETRY
85 * without FAULT_FLAG_ALLOW_RETRY but with
90 down_read(&mm
->mmap_sem
);
91 ret
= __get_user_pages(tsk
, mm
, start
, 1, flags
| FOLL_TRIED
,
106 if (notify_drop
&& lock_dropped
&& *locked
) {
108 * We must let the caller know we temporarily dropped the lock
109 * and so the critical section protected by it was lost.
111 up_read(&mm
->mmap_sem
);
118 * We can leverage the VM_FAULT_RETRY functionality in the page fault
119 * paths better by using either get_user_pages_locked() or
120 * get_user_pages_unlocked().
122 * get_user_pages_locked() is suitable to replace the form:
124 * down_read(&mm->mmap_sem);
126 * get_user_pages(tsk, mm, ..., pages, NULL);
127 * up_read(&mm->mmap_sem);
132 * down_read(&mm->mmap_sem);
134 * get_user_pages_locked(tsk, mm, ..., pages, &locked);
136 * up_read(&mm->mmap_sem);
138 long get_user_pages_locked(unsigned long start
, unsigned long nr_pages
,
139 int write
, int force
, struct page
**pages
,
142 return __get_user_pages_locked(current
, current
->mm
, start
, nr_pages
,
143 write
, force
, pages
, NULL
, locked
, true,
146 EXPORT_SYMBOL_GPL(get_user_pages_locked
);
149 * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
150 * pass additional gup_flags as last parameter (like FOLL_HWPOISON).
152 * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the
153 * caller if required (just like with __get_user_pages). "FOLL_GET",
154 * "FOLL_WRITE" and "FOLL_FORCE" are set implicitly as needed
155 * according to the parameters "pages", "write", "force"
158 static __always_inline
long __get_user_pages_unlocked(struct task_struct
*tsk
, struct mm_struct
*mm
,
159 unsigned long start
, unsigned long nr_pages
,
160 int write
, int force
, struct page
**pages
,
161 unsigned int gup_flags
)
165 down_read(&mm
->mmap_sem
);
166 ret
= __get_user_pages_locked(tsk
, mm
, start
, nr_pages
, write
, force
,
167 pages
, NULL
, &locked
, false, gup_flags
);
169 up_read(&mm
->mmap_sem
);
174 * get_user_pages_unlocked() is suitable to replace the form:
176 * down_read(&mm->mmap_sem);
177 * get_user_pages(tsk, mm, ..., pages, NULL);
178 * up_read(&mm->mmap_sem);
182 * get_user_pages_unlocked(tsk, mm, ..., pages);
184 * It is functionally equivalent to get_user_pages_fast so
185 * get_user_pages_fast should be used instead, if the two parameters
186 * "tsk" and "mm" are respectively equal to current and current->mm,
187 * or if "force" shall be set to 1 (get_user_pages_fast misses the
188 * "force" parameter).
190 long get_user_pages_unlocked(unsigned long start
, unsigned long nr_pages
,
191 int write
, int force
, struct page
**pages
)
193 return __get_user_pages_unlocked(current
, current
->mm
, start
, nr_pages
,
194 write
, force
, pages
, FOLL_TOUCH
);
196 EXPORT_SYMBOL_GPL(get_user_pages_unlocked
);
200 * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
201 * @buf: data blob to dump
202 * @len: number of bytes in the @buf
203 * @rowsize: number of bytes to print per line; must be 16 or 32
204 * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
205 * @linebuf: where to put the converted data
206 * @linebuflen: total size of @linebuf, including space for terminating NUL
207 * @ascii: include ASCII after the hex output
209 * hex_dump_to_buffer() works on one "line" of output at a time, i.e.,
210 * 16 or 32 bytes of input data converted to hex + ASCII output.
212 * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data
213 * to a hex + ASCII dump at the supplied memory location.
214 * The converted output is always NUL-terminated.
217 * hex_dump_to_buffer(frame->data, frame->len, 16, 1,
218 * linebuf, sizeof(linebuf), true);
220 * example output buffer:
221 * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
224 * The amount of bytes placed in the buffer without terminating NUL. If the
225 * output was truncated, then the return value is the number of bytes
226 * (excluding the terminating NUL) which would have been written to the final
227 * string if enough space had been available.
229 int hex_dump_to_buffer(const void *buf
, size_t len
, int rowsize
, int groupsize
,
230 char *linebuf
, size_t linebuflen
, bool ascii
)
239 if (rowsize
!= 16 && rowsize
!= 32)
242 if (len
> rowsize
) /* limit to one line at a time */
244 if (!is_power_of_2(groupsize
) || groupsize
> 8)
246 if ((len
% groupsize
) != 0) /* no mixed size output */
249 ngroups
= len
/ groupsize
;
250 ascii_column
= rowsize
* 2 + rowsize
/ groupsize
+ 1;
258 if (groupsize
== 8) {
259 const u64
*ptr8
= buf
;
261 for (j
= 0; j
< ngroups
; j
++) {
262 ret
= snprintf(linebuf
+ lx
, linebuflen
- lx
,
263 "%s%16.16llx", j
? " " : "",
264 get_unaligned(ptr8
+ j
));
265 if (ret
>= linebuflen
- lx
)
269 } else if (groupsize
== 4) {
270 const u32
*ptr4
= buf
;
272 for (j
= 0; j
< ngroups
; j
++) {
273 ret
= snprintf(linebuf
+ lx
, linebuflen
- lx
,
274 "%s%8.8x", j
? " " : "",
275 get_unaligned(ptr4
+ j
));
276 if (ret
>= linebuflen
- lx
)
280 } else if (groupsize
== 2) {
281 const u16
*ptr2
= buf
;
283 for (j
= 0; j
< ngroups
; j
++) {
284 ret
= snprintf(linebuf
+ lx
, linebuflen
- lx
,
285 "%s%4.4x", j
? " " : "",
286 get_unaligned(ptr2
+ j
));
287 if (ret
>= linebuflen
- lx
)
292 for (j
= 0; j
< len
; j
++) {
293 if (linebuflen
< lx
+ 3)
296 linebuf
[lx
++] = hex_asc_hi(ch
);
297 linebuf
[lx
++] = hex_asc_lo(ch
);
306 while (lx
< ascii_column
) {
307 if (linebuflen
< lx
+ 2)
311 for (j
= 0; j
< len
; j
++) {
312 if (linebuflen
< lx
+ 2)
315 linebuf
[lx
++] = (isascii(ch
) && isprint(ch
)) ? ch
: '.';
321 linebuf
[lx
++] = '\0';
323 return ascii
? ascii_column
+ len
: (groupsize
* 2 + 1) * ngroups
- 1;
325 EXPORT_SYMBOL_GPL(hex_dump_to_buffer
);
328 ftrace_print_array_seq(struct trace_seq
*p
, const void *buf
, int buf_len
,
331 const char *ret
= trace_seq_buffer_ptr(p
);
332 const char *prefix
= "";
333 void *ptr
= (void *)buf
;
335 trace_seq_putc(p
, '{');
337 while (ptr
< buf
+ buf_len
) {
340 trace_seq_printf(p
, "%s0x%x", prefix
,
344 trace_seq_printf(p
, "%s0x%x", prefix
,
348 trace_seq_printf(p
, "%s0x%x", prefix
,
352 trace_seq_printf(p
, "%s0x%llx", prefix
,
356 trace_seq_printf(p
, "BAD SIZE:%zu 0x%x", el_size
,
364 trace_seq_putc(p
, '}');
365 trace_seq_putc(p
, 0);
369 EXPORT_SYMBOL(ftrace_print_array_seq
);