2 * udebug - debug ring buffer library
4 * Copyright (C) 2023 Felix Fietkau <nbd@nbd.name>
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include "udebug-priv.h"
20 static struct udebug_client_msg
*
21 send_and_wait(struct udebug
*ctx
, struct udebug_client_msg
*msg
, int *rfd
)
26 udebug_send_msg(ctx
, msg
, NULL
, -1);
32 msg
= __udebug_poll(ctx
, &fd
, true);
33 } while (msg
&& msg
->type
!= type
);
46 udebug_remote_get_handle(struct udebug
*ctx
)
48 struct udebug_client_msg
*msg
;
49 struct udebug_client_msg send_msg
= {
50 .type
= CL_MSG_GET_HANDLE
,
53 if (ctx
->poll_handle
>= 0 || !udebug_is_connected(ctx
))
56 msg
= send_and_wait(ctx
, &send_msg
, NULL
);
60 ctx
->poll_handle
= msg
->id
;
64 struct udebug_remote_buf
*udebug_remote_buf_get(struct udebug
*ctx
, uint32_t id
)
66 struct udebug_remote_buf
*rb
;
67 void *key
= (void *)(uintptr_t)id
;
69 return avl_find_element(&ctx
->remote_rings
, key
, rb
, node
);
72 int udebug_remote_buf_map(struct udebug
*ctx
, struct udebug_remote_buf
*rb
, uint32_t id
)
74 void *key
= (void *)(uintptr_t)id
;
75 struct udebug_client_msg
*msg
;
76 struct udebug_client_msg send_msg
= {
77 .type
= CL_MSG_RING_GET
,
82 if (rb
->buf
.data
|| !udebug_is_connected(ctx
))
85 msg
= send_and_wait(ctx
, &send_msg
, &fd
);
89 if (udebug_buf_open(&rb
->buf
, fd
, msg
->ring_size
, msg
->data_size
)) {
90 fprintf(stderr
, "failed to open fd %d, ring_size=%d, data_size=%d\n", fd
, msg
->ring_size
, msg
->data_size
);
97 avl_insert(&ctx
->remote_rings
, &rb
->node
);
102 void udebug_remote_buf_unmap(struct udebug
*ctx
, struct udebug_remote_buf
*rb
)
107 avl_delete(&ctx
->remote_rings
, &rb
->node
);
108 udebug_buf_free(&rb
->buf
);
114 int udebug_remote_buf_set_poll(struct udebug
*ctx
, struct udebug_remote_buf
*rb
, bool val
)
128 handle
= udebug_remote_get_handle(ctx
);
132 __atomic_fetch_or(&rb
->buf
.hdr
->notify
, 1UL << handle
, __ATOMIC_RELAXED
);
137 rbuf_advance_read_head(struct udebug_remote_buf
*rb
, uint32_t head
,
138 uint32_t *data_start
)
140 struct udebug_hdr
*hdr
= rb
->buf
.hdr
;
141 uint32_t min_head
= head
+ 1 - rb
->buf
.ring_size
;
142 uint32_t min_data
= u32_get(&hdr
->data_used
) - rb
->buf
.data_size
;
143 struct udebug_ptr
*last_ptr
= udebug_ring_ptr(hdr
, head
- 1);
145 if (!u32_get(&hdr
->head_hi
) && u32_sub(0, min_head
) > 0)
148 /* advance head to skip over any entries that are guaranteed
149 * to be overwritten now. final check will be performed after
152 if (u32_sub(rb
->head
, min_head
) < 0)
155 for (size_t i
= 0; i
< rb
->buf
.ring_size
; i
++) {
156 struct udebug_ptr
*ptr
= udebug_ring_ptr(hdr
, rb
->head
);
159 *data_start
= u32_get(&ptr
->start
);
160 __sync_synchronize();
163 if (ptr
->timestamp
> last_ptr
->timestamp
)
166 if (u32_sub(ptr
->start
, min_data
) > 0)
173 void udebug_remote_buf_set_start_time(struct udebug_remote_buf
*rb
, uint64_t ts
)
175 struct udebug_hdr
*hdr
= rb
->buf
.hdr
;
176 uint32_t head
= u32_get(&hdr
->head
);
177 uint32_t start
= rb
->head
, end
= head
;
183 rbuf_advance_read_head(rb
, head
, NULL
);
184 while ((diff
= u32_sub(end
, start
)) > 0) {
185 uint32_t cur
= start
+ diff
/ 2;
186 struct udebug_ptr
*ptr
;
188 ptr
= udebug_ring_ptr(hdr
, cur
);
189 if (ptr
->timestamp
> ts
)
198 void udebug_remote_buf_set_start_offset(struct udebug_remote_buf
*rb
, uint32_t idx
)
203 rb
->head
= rb
->buf
.hdr
->head
- idx
;
206 void udebug_remote_buf_set_flags(struct udebug_remote_buf
*rb
, uint64_t mask
, uint64_t set
)
208 struct udebug_hdr
*hdr
= rb
->buf
.hdr
;
214 __atomic_and_fetch(&hdr
->flags
[0], (uintptr_t)~mask
, __ATOMIC_RELAXED
);
216 __atomic_or_fetch(&hdr
->flags
[0], (uintptr_t)set
, __ATOMIC_RELAXED
);
218 if (sizeof(mask
) == sizeof(unsigned long))
223 __atomic_and_fetch(&hdr
->flags
[1], (uintptr_t)~mask
, __ATOMIC_RELAXED
);
225 __atomic_or_fetch(&hdr
->flags
[1], (uintptr_t)set
, __ATOMIC_RELAXED
);
228 struct udebug_snapshot
*
229 udebug_remote_buf_snapshot(struct udebug_remote_buf
*rb
)
231 struct udebug_hdr
*hdr
= rb
->buf
.hdr
;
232 struct udebug_ptr
*last_ptr
;
233 uint32_t data_start
, data_end
, data_used
;
234 struct udebug_snapshot
*s
= NULL
;
235 struct udebug_ptr
*ptr_buf
, *first_ptr
;
236 uint32_t data_size
, ptr_size
;
237 uint32_t head
, first_idx
;
238 uint32_t prev_read_head
= rb
->head
;
244 head
= u32_get(&hdr
->head
);
245 rbuf_advance_read_head(rb
, head
, &data_start
);
246 if (rb
->head
== head
)
249 first_idx
= rb
->head
;
250 first_ptr
= udebug_ring_ptr(hdr
, first_idx
);
251 last_ptr
= udebug_ring_ptr(hdr
, head
- 1);
252 data_end
= last_ptr
->start
+ last_ptr
->len
;
254 data_size
= data_end
- data_start
;
255 ptr_size
= head
- rb
->head
;
256 if (data_size
> rb
->buf
.data_size
|| ptr_size
> rb
->buf
.ring_size
) {
257 fprintf(stderr
, "Invalid data size: %x > %x, %x > %x\n", data_size
, (int)rb
->buf
.data_size
, ptr_size
, (int)rb
->buf
.ring_size
);
261 s
= calloc_a(sizeof(*s
),
262 &ptr_buf
, ptr_size
* sizeof(*ptr_buf
),
263 &data_buf
, data_size
);
265 s
->data
= memcpy(data_buf
, udebug_buf_ptr(&rb
->buf
, data_start
), data_size
);
266 s
->data_size
= data_size
;
267 s
->entries
= ptr_buf
;
268 s
->dropped
= rb
->head
- prev_read_head
;
270 if (first_ptr
> last_ptr
) {
271 struct udebug_ptr
*start_ptr
= udebug_ring_ptr(hdr
, 0);
272 struct udebug_ptr
*end_ptr
= udebug_ring_ptr(hdr
, rb
->buf
.ring_size
- 1) + 1;
273 uint32_t size
= end_ptr
- first_ptr
;
274 memcpy(s
->entries
, first_ptr
, size
* sizeof(*s
->entries
));
275 memcpy(s
->entries
+ size
, start_ptr
, (last_ptr
+ 1 - start_ptr
) * sizeof(*s
->entries
));
277 memcpy(s
->entries
, first_ptr
, (last_ptr
+ 1 - first_ptr
) * sizeof(*s
->entries
));
280 /* get a snapshot of the counter that indicates how much data has been
281 * clobbered by newly added entries */
282 __sync_synchronize();
283 data_used
= u32_get(&hdr
->data_used
) - rb
->buf
.data_size
;
285 s
->n_entries
= head
- first_idx
;
287 rbuf_advance_read_head(rb
, head
, NULL
);
288 if (s
->n_entries
< rb
->head
- first_idx
) {
294 s
->entries
+= rb
->head
- first_idx
;
295 s
->n_entries
-= rb
->head
- first_idx
;
296 while (s
->n_entries
> 0 &&
297 u32_sub(s
->entries
[0].start
, data_used
) < 0) {
303 for (size_t i
= 0; i
< s
->n_entries
; i
++)
304 s
->entries
[i
].start
-= data_start
;
306 s
->format
= hdr
->format
;
307 s
->sub_format
= hdr
->sub_format
;
308 s
->rbuf_idx
= (uint32_t)(uintptr_t)rb
->node
.key
;
315 bool udebug_snapshot_get_entry(struct udebug_snapshot
*s
, struct udebug_iter
*it
, unsigned int entry
)
317 struct udebug_ptr
*ptr
;
320 if (entry
>= s
->n_entries
)
323 ptr
= &s
->entries
[entry
];
324 if (ptr
->start
> s
->data_size
|| ptr
->len
> s
->data_size
||
325 ptr
->start
+ ptr
->len
> s
->data_size
)
329 it
->data
= s
->data
+ ptr
->start
;
331 it
->timestamp
= ptr
->timestamp
;
339 void udebug_iter_start(struct udebug_iter
*it
, struct udebug_snapshot
**s
, size_t n
)
341 memset(it
, 0, sizeof(*it
));
346 for (size_t i
= 0; i
< it
->n
; i
++)
347 it
->list
[i
]->iter_idx
= 0;
350 bool udebug_iter_next(struct udebug_iter
*it
)
353 struct udebug_snapshot
*s
;
357 for (size_t i
= 0; i
< it
->n
; i
++) {
358 struct udebug_ptr
*ptr
;
361 if (s
->iter_idx
>= s
->n_entries
)
364 ptr
= &s
->entries
[s
->iter_idx
];
365 if (cur
>= 0 && ptr
->timestamp
> cur_ts
)
369 cur_ts
= ptr
->timestamp
;
377 if (!udebug_snapshot_get_entry(s
, it
, s
->iter_idx
++))