2 * udebug - debug ring buffer library
4 * Copyright (C) 2023 Felix Fietkau <nbd@nbd.name>
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 #include <sys/types.h>
21 #include <sys/socket.h>
29 #include "udebug-priv.h"
32 #define ALIGN(i, sz) (((i) + (sz) - 1) & ~((sz) - 1))
35 #define MAP_ANONYMOUS MAP_ANON
38 #define UDEBUG_MIN_ALLOC_LEN 128
39 static struct blob_buf b
;
41 static void __randname(char *template)
47 clock_gettime(CLOCK_REALTIME
, &ts
);
48 r
= ts
.tv_sec
+ ts
.tv_nsec
;
49 for (i
=0; i
<6; i
++, r
>>=5)
50 template[i
] = 'A'+(r
&15)+(r
&16)*2;
53 int udebug_id_cmp(const void *k1
, const void *k2
, void *ptr
)
55 uint32_t id1
= (uint32_t)(uintptr_t)k1
, id2
= (uint32_t)(uintptr_t)k2
;
64 shm_open_anon(char *name
)
66 char *template = name
+ strlen(name
) - 6;
69 if (template < name
|| memcmp(template, "XXXXXX", 6) != 0)
72 for (int i
= 0; i
< 100; i
++) {
74 fd
= shm_open(name
, O_RDWR
| O_CREAT
| O_EXCL
, 0600);
76 if (shm_unlink(name
) < 0) {
83 if (fd
< 0 && errno
!= EEXIST
)
90 static void __udebug_disconnect(struct udebug
*ctx
, bool reconnect
)
92 uloop_fd_delete(&ctx
->fd
);
95 ctx
->poll_handle
= -1;
96 if (ctx
->reconnect
.cb
&& reconnect
)
97 uloop_timeout_set(&ctx
->reconnect
, 1);
100 uint64_t udebug_timestamp(void)
105 clock_gettime(CLOCK_REALTIME
, &ts
);
108 val
*= UDEBUG_TS_SEC
;
109 val
+= ts
.tv_nsec
/ 1000;
115 __udebug_buf_map(struct udebug_buf
*buf
)
119 ptr
= mmap(NULL
, buf
->head_size
+ 2 * buf
->data_size
, PROT_NONE
,
120 MAP_ANONYMOUS
| MAP_PRIVATE
, -1, 0);
121 if (ptr
== MAP_FAILED
)
124 ptr2
= mmap(ptr
, buf
->head_size
+ buf
->data_size
,
125 PROT_READ
| PROT_WRITE
, MAP_FIXED
| MAP_SHARED
, buf
->fd
, 0);
129 ptr2
= mmap(ptr
+ buf
->head_size
+ buf
->data_size
, buf
->data_size
,
130 PROT_READ
| PROT_WRITE
, MAP_FIXED
| MAP_SHARED
, buf
->fd
,
132 if (ptr2
!= ptr
+ buf
->head_size
+ buf
->data_size
)
136 buf
->data
= ptr
+ buf
->head_size
;
140 munmap(ptr
, buf
->head_size
+ 2 * buf
->data_size
);
145 writev_retry(int fd
, struct iovec
*iov
, int iov_len
, int sock_fd
)
147 uint8_t fd_buf
[CMSG_SPACE(sizeof(int))] = { 0 };
148 struct msghdr msghdr
= { 0 };
149 struct cmsghdr
*cmsg
;
153 msghdr
.msg_iov
= iov
,
154 msghdr
.msg_iovlen
= iov_len
,
155 msghdr
.msg_control
= fd_buf
;
156 msghdr
.msg_controllen
= sizeof(fd_buf
);
158 cmsg
= CMSG_FIRSTHDR(&msghdr
);
159 cmsg
->cmsg_type
= SCM_RIGHTS
;
160 cmsg
->cmsg_level
= SOL_SOCKET
;
161 cmsg
->cmsg_len
= CMSG_LEN(sizeof(int));
163 pfd
= (int *) CMSG_DATA(cmsg
);
164 msghdr
.msg_controllen
= cmsg
->cmsg_len
;
170 msghdr
.msg_control
= NULL
;
171 msghdr
.msg_controllen
= 0;
176 cur_len
= sendmsg(fd
, &msghdr
, 0);
178 struct pollfd pfd
= {
199 while (cur_len
>= (ssize_t
) iov
->iov_len
) {
200 cur_len
-= iov
->iov_len
;
206 iov
->iov_base
+= cur_len
;
207 iov
->iov_len
-= cur_len
;
208 msghdr
.msg_iov
= iov
;
209 msghdr
.msg_iovlen
= iov_len
;
212 /* Should never reach here */
217 recv_retry(int fd
, struct iovec
*iov
, bool wait
, int *recv_fd
)
219 uint8_t fd_buf
[CMSG_SPACE(sizeof(int))] = { 0 };
220 struct msghdr msghdr
= { 0 };
221 struct cmsghdr
*cmsg
;
226 msghdr
.msg_iov
= iov
,
227 msghdr
.msg_iovlen
= 1,
228 msghdr
.msg_control
= fd_buf
;
229 msghdr
.msg_controllen
= sizeof(fd_buf
);
231 cmsg
= CMSG_FIRSTHDR(&msghdr
);
232 cmsg
->cmsg_type
= SCM_RIGHTS
;
233 cmsg
->cmsg_level
= SOL_SOCKET
;
234 cmsg
->cmsg_len
= CMSG_LEN(sizeof(int));
236 pfd
= (int *) CMSG_DATA(cmsg
);
238 while (iov
->iov_len
> 0) {
240 msghdr
.msg_control
= fd_buf
;
241 msghdr
.msg_controllen
= cmsg
->cmsg_len
;
243 msghdr
.msg_control
= NULL
;
244 msghdr
.msg_controllen
= 0;
248 bytes
= recvmsg(fd
, &msghdr
, 0);
271 iov
->iov_len
-= bytes
;
272 iov
->iov_base
+= bytes
;
275 if (iov
->iov_len
> 0) {
276 struct pollfd pfd
= {
282 ret
= poll(&pfd
, 1, UDEBUG_TIMEOUT
);
283 } while (ret
< 0 && errno
== EINTR
);
285 if (!(pfd
.revents
& POLLIN
))
293 void udebug_send_msg(struct udebug
*ctx
, struct udebug_client_msg
*msg
,
294 struct blob_attr
*meta
, int fd
)
296 struct iovec iov
[2] = {
297 { .iov_base
= msg
, .iov_len
= sizeof(*msg
) },
302 blob_buf_init(&b
, 0);
306 iov
[1].iov_base
= meta
;
307 iov
[1].iov_len
= blob_pad_len(meta
);
308 writev_retry(ctx
->fd
.fd
, iov
, ARRAY_SIZE(iov
), fd
);
312 udebug_buf_msg(struct udebug_buf
*buf
, enum udebug_client_msg_type type
)
314 struct udebug_client_msg msg
= {
319 udebug_send_msg(buf
->ctx
, &msg
, NULL
, -1);
322 static size_t __udebug_headsize(unsigned int ring_size
, unsigned int page_size
)
324 ring_size
*= sizeof(struct udebug_ptr
);
325 return ALIGN(sizeof(struct udebug_hdr
) + ring_size
, page_size
);
328 int udebug_buf_open(struct udebug_buf
*buf
, int fd
, uint32_t ring_size
, uint32_t data_size
)
330 INIT_LIST_HEAD(&buf
->list
);
332 buf
->ring_size
= ring_size
;
333 buf
->head_size
= __udebug_headsize(ring_size
, sysconf(_SC_PAGESIZE
));
334 buf
->data_size
= data_size
;
336 if (buf
->ring_size
> (1U << 24) || buf
->data_size
> (1U << 29))
339 if (__udebug_buf_map(buf
))
342 if (buf
->ring_size
!= buf
->hdr
->ring_size
||
343 buf
->data_size
!= buf
->hdr
->data_size
) {
344 munmap(buf
->hdr
, buf
->head_size
+ 2 * buf
->data_size
);
352 int udebug_buf_init(struct udebug_buf
*buf
, size_t entries
, size_t size
)
354 uint32_t pagesz
= sysconf(_SC_PAGESIZE
);
355 char filename
[] = "/udebug.XXXXXX";
356 unsigned int order
= 12;
357 uint8_t ring_order
= 5;
361 INIT_LIST_HEAD(&buf
->list
);
364 while(size
> 1U << order
)
367 while (entries
> 1U << ring_order
)
369 entries
= 1 << ring_order
;
371 if (size
> (1U << 29) || entries
> (1U << 24))
374 head_size
= __udebug_headsize(entries
, pagesz
);
375 while (ALIGN(sizeof(*buf
->hdr
) + (entries
* 2) * sizeof(struct udebug_ptr
), pagesz
) == head_size
)
378 fd
= shm_open_anon(filename
);
382 if (ftruncate(fd
, head_size
+ size
) < 0)
385 buf
->head_size
= head_size
;
386 buf
->data_size
= size
;
387 buf
->ring_size
= entries
;
390 if (__udebug_buf_map(buf
))
393 buf
->hdr
->ring_size
= entries
;
394 buf
->hdr
->data_size
= size
;
396 /* ensure hdr changes are visible */
397 __sync_synchronize();
406 static void *udebug_buf_alloc(struct udebug_buf
*buf
, uint32_t ofs
, uint32_t len
)
408 struct udebug_hdr
*hdr
= buf
->hdr
;
410 hdr
->data_used
= u32_max(hdr
->data_used
, ofs
+ len
+ 1);
412 /* ensure that data_used update is visible before clobbering data */
413 __sync_synchronize();
415 return udebug_buf_ptr(buf
, ofs
);
418 uint64_t udebug_buf_flags(struct udebug_buf
*buf
)
420 struct udebug_hdr
*hdr
= buf
->hdr
;
426 flags
= hdr
->flags
[0];
427 if (sizeof(flags
) != sizeof(uintptr_t))
428 flags
|= ((uint64_t)hdr
->flags
[1]) << 32;
433 void udebug_entry_init_ts(struct udebug_buf
*buf
, uint64_t timestamp
)
435 struct udebug_hdr
*hdr
= buf
->hdr
;
436 struct udebug_ptr
*ptr
;
441 ptr
= udebug_ring_ptr(hdr
, hdr
->head
);
442 ptr
->start
= hdr
->data_head
;
444 ptr
->timestamp
= timestamp
;
447 void *udebug_entry_append(struct udebug_buf
*buf
, const void *data
, uint32_t len
)
449 struct udebug_hdr
*hdr
= buf
->hdr
;
450 struct udebug_ptr
*ptr
;
457 ptr
= udebug_ring_ptr(hdr
, hdr
->head
);
458 ofs
= ptr
->start
+ ptr
->len
;
459 if (ptr
->len
+ len
> buf
->data_size
/ 2)
462 ret
= udebug_buf_alloc(buf
, ofs
, len
);
464 memcpy(ret
, data
, len
);
470 uint16_t udebug_entry_trim(struct udebug_buf
*buf
, uint16_t len
)
472 struct udebug_hdr
*hdr
= buf
->hdr
;
473 struct udebug_ptr
*ptr
= udebug_ring_ptr(hdr
, hdr
->head
);
481 void udebug_entry_set_length(struct udebug_buf
*buf
, uint16_t len
)
483 struct udebug_hdr
*hdr
= buf
->hdr
;
484 struct udebug_ptr
*ptr
= udebug_ring_ptr(hdr
, hdr
->head
);
489 int udebug_entry_printf(struct udebug_buf
*buf
, const char *fmt
, ...)
495 ret
= udebug_entry_vprintf(buf
, fmt
, ap
);
501 int udebug_entry_vprintf(struct udebug_buf
*buf
, const char *fmt
, va_list ap
)
503 struct udebug_hdr
*hdr
= buf
->hdr
;
504 struct udebug_ptr
*ptr
;
512 ptr
= udebug_ring_ptr(hdr
, hdr
->head
);
513 ofs
= ptr
->start
+ ptr
->len
;
514 if (ptr
->len
> buf
->data_size
/ 2)
517 str
= udebug_buf_alloc(buf
, ofs
, UDEBUG_MIN_ALLOC_LEN
);
518 len
= vsnprintf(str
, UDEBUG_MIN_ALLOC_LEN
, fmt
, ap
);
519 if (len
<= UDEBUG_MIN_ALLOC_LEN
)
522 if (ptr
->len
+ len
> buf
->data_size
/ 2)
525 udebug_buf_alloc(buf
, ofs
, len
+ 1);
526 len
= vsnprintf(str
, len
, fmt
, ap
);
533 void udebug_entry_add(struct udebug_buf
*buf
)
535 struct udebug_hdr
*hdr
= buf
->hdr
;
536 struct udebug_ptr
*ptr
= udebug_ring_ptr(hdr
, hdr
->head
);
540 /* ensure strings are always 0-terminated */
541 data
= udebug_buf_ptr(buf
, ptr
->start
+ ptr
->len
);
543 hdr
->data_head
= ptr
->start
+ ptr
->len
+ 1;
545 /* ensure that all data changes are visible before advancing head */
546 __sync_synchronize();
548 u32_set(&hdr
->head
, u32_get(&hdr
->head
) + 1);
549 if (!u32_get(&hdr
->head
))
550 u32_set(&hdr
->head_hi
, u32_get(&hdr
->head_hi
) + 1);
552 /* ensure that head change is visible */
553 __sync_synchronize();
555 notify
= __atomic_exchange_n(&hdr
->notify
, 0, __ATOMIC_RELAXED
);
557 struct udebug_client_msg msg
= {
558 .type
= CL_MSG_RING_NOTIFY
,
560 .notify_mask
= notify
,
562 blob_buf_init(&b
, 0);
564 udebug_send_msg(buf
->ctx
, &msg
, b
.head
, -1);
567 void udebug_buf_free(struct udebug_buf
*buf
)
569 struct udebug
*ctx
= buf
->ctx
;
571 if (!list_empty(&buf
->list
) && buf
->list
.prev
)
572 list_del(&buf
->list
);
574 if (ctx
&& ctx
->fd
.fd
>= 0)
575 udebug_buf_msg(buf
, CL_MSG_RING_REMOVE
);
577 munmap(buf
->hdr
, buf
->head_size
+ 2 * buf
->data_size
);
579 memset(buf
, 0, sizeof(*buf
));
583 __udebug_buf_add(struct udebug
*ctx
, struct udebug_buf
*buf
)
585 struct udebug_client_msg msg
= {
586 .type
= CL_MSG_RING_ADD
,
588 .ring_size
= buf
->hdr
->ring_size
,
589 .data_size
= buf
->hdr
->data_size
,
591 const struct udebug_buf_meta
*meta
= buf
->meta
;
594 blob_buf_init(&b
, 0);
595 blobmsg_add_string(&b
, "name", meta
->name
);
596 c
= blobmsg_open_array(&b
, "flags");
597 for (size_t i
= 0; i
< meta
->n_flags
; i
++) {
598 const struct udebug_buf_flag
*flag
= &meta
->flags
[i
];
599 void *e
= blobmsg_open_array(&b
, NULL
);
600 blobmsg_add_string(&b
, NULL
, flag
->name
);
601 blobmsg_add_u64(&b
, NULL
, flag
->mask
);
602 blobmsg_close_array(&b
, e
);
604 blobmsg_close_array(&b
, c
);
606 udebug_send_msg(ctx
, &msg
, b
.head
, buf
->fd
);
609 int udebug_buf_add(struct udebug
*ctx
, struct udebug_buf
*buf
,
610 const struct udebug_buf_meta
*meta
)
612 list_add_tail(&buf
->list
, &ctx
->local_rings
);
615 buf
->id
= ctx
->next_id
++;
616 buf
->hdr
->format
= meta
->format
;
617 buf
->hdr
->sub_format
= meta
->sub_format
;
620 __udebug_buf_add(ctx
, buf
);
625 void udebug_init(struct udebug
*ctx
)
627 INIT_LIST_HEAD(&ctx
->local_rings
);
628 avl_init(&ctx
->remote_rings
, udebug_id_cmp
, true, NULL
);
630 ctx
->poll_handle
= -1;
633 static void udebug_reconnect_cb(struct uloop_timeout
*t
)
635 struct udebug
*ctx
= container_of(t
, struct udebug
, reconnect
);
637 if (udebug_connect(ctx
, ctx
->socket_path
) < 0) {
638 uloop_timeout_set(&ctx
->reconnect
, 1000);
642 udebug_add_uloop(ctx
);
645 void udebug_auto_connect(struct udebug
*ctx
, const char *path
)
647 free(ctx
->socket_path
);
648 ctx
->reconnect
.cb
= udebug_reconnect_cb
;
649 ctx
->socket_path
= path
? strdup(path
) : NULL
;
653 udebug_reconnect_cb(&ctx
->reconnect
);
656 int udebug_connect(struct udebug
*ctx
, const char *path
)
658 struct udebug_remote_buf
*rb
;
659 struct udebug_buf
*buf
;
666 path
= UDEBUG_SOCK_NAME
;
668 ctx
->fd
.fd
= usock(USOCK_UNIX
, path
, NULL
);
672 list_for_each_entry(buf
, &ctx
->local_rings
, list
)
673 __udebug_buf_add(ctx
, buf
);
675 avl_for_each_element(&ctx
->remote_rings
, rb
, node
) {
680 udebug_remote_buf_set_poll(ctx
, rb
, true);
687 udebug_recv_msg(struct udebug
*ctx
, struct udebug_client_msg
*msg
, int *fd
,
692 .iov_len
= sizeof(*msg
)
696 ret
= recv_retry(ctx
->fd
.fd
, &iov
, wait
, fd
);
698 __udebug_disconnect(ctx
, true);
700 return ret
== sizeof(*msg
);
703 struct udebug_client_msg
*__udebug_poll(struct udebug
*ctx
, int *fd
, bool wait
)
705 static struct udebug_client_msg msg
= {};
707 while (udebug_recv_msg(ctx
, &msg
, fd
, wait
)) {
708 struct udebug_remote_buf
*rb
;
711 if (msg
.type
!= CL_MSG_RING_NOTIFY
)
720 key
= (void *)(uintptr_t)msg
.id
;
721 rb
= avl_find_element(&ctx
->remote_rings
, key
, rb
, node
);
722 if (!rb
|| !rb
->poll
)
725 if (ctx
->poll_handle
>= 0)
726 __atomic_fetch_or(&rb
->buf
.hdr
->notify
,
727 1UL << ctx
->poll_handle
,
729 ctx
->notify_cb(ctx
, rb
);
735 void udebug_poll(struct udebug
*ctx
)
737 while (__udebug_poll(ctx
, NULL
, false));
740 static void udebug_fd_cb(struct uloop_fd
*fd
, unsigned int events
)
742 struct udebug
*ctx
= container_of(fd
, struct udebug
, fd
);
745 __udebug_disconnect(ctx
, true);
750 void udebug_add_uloop(struct udebug
*ctx
)
752 if (ctx
->fd
.registered
)
755 ctx
->fd
.cb
= udebug_fd_cb
;
756 uloop_fd_add(&ctx
->fd
, ULOOP_READ
);
759 void udebug_free(struct udebug
*ctx
)
761 struct udebug_remote_buf
*rb
, *tmp
;
762 struct udebug_buf
*buf
;
764 free(ctx
->socket_path
);
765 ctx
->socket_path
= NULL
;
767 __udebug_disconnect(ctx
, false);
768 uloop_timeout_cancel(&ctx
->reconnect
);
770 while (!list_empty(&ctx
->local_rings
)) {
771 buf
= list_first_entry(&ctx
->local_rings
, struct udebug_buf
, list
);
772 udebug_buf_free(buf
);
775 avl_for_each_element_safe(&ctx
->remote_rings
, rb
, node
, tmp
)
776 udebug_remote_buf_unmap(ctx
, rb
);