2 * udebug - debug ring buffer library
4 * Copyright (C) 2023 Felix Fietkau <nbd@nbd.name>
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 #include <sys/types.h>
21 #include <sys/socket.h>
29 #include "udebug-priv.h"
32 #define ALIGN(i, sz) (((i) + (sz) - 1) & ~((sz) - 1))
35 #define MAP_ANONYMOUS MAP_ANON
38 #define UDEBUG_MIN_ALLOC_LEN 128
39 static struct blob_buf b
;
41 static void __randname(char *template)
47 clock_gettime(CLOCK_REALTIME
, &ts
);
48 r
= ts
.tv_sec
+ ts
.tv_nsec
;
49 for (i
=0; i
<6; i
++, r
>>=5)
50 template[i
] = 'A'+(r
&15)+(r
&16)*2;
53 int udebug_id_cmp(const void *k1
, const void *k2
, void *ptr
)
55 uint32_t id1
= (uint32_t)(uintptr_t)k1
, id2
= (uint32_t)(uintptr_t)k2
;
64 shm_open_anon(char *name
)
66 char *template = name
+ strlen(name
) - 6;
69 if (template < name
|| memcmp(template, "XXXXXX", 6) != 0)
72 for (int i
= 0; i
< 100; i
++) {
74 fd
= shm_open(name
, O_RDWR
| O_CREAT
| O_EXCL
, 0600);
76 if (shm_unlink(name
) < 0) {
83 if (fd
< 0 && errno
!= EEXIST
)
90 static void __udebug_disconnect(struct udebug
*ctx
, bool reconnect
)
92 uloop_fd_delete(&ctx
->fd
);
95 ctx
->poll_handle
= -1;
96 if (ctx
->reconnect
.cb
&& reconnect
)
97 uloop_timeout_set(&ctx
->reconnect
, 1);
100 uint64_t udebug_timestamp(void)
105 clock_gettime(CLOCK_REALTIME
, &ts
);
108 val
*= UDEBUG_TS_SEC
;
109 val
+= ts
.tv_nsec
/ 1000;
115 __udebug_buf_map(struct udebug_buf
*buf
)
119 ptr
= mmap(NULL
, buf
->head_size
+ 2 * buf
->data_size
, PROT_NONE
,
120 MAP_ANONYMOUS
| MAP_PRIVATE
, -1, 0);
121 if (ptr
== MAP_FAILED
)
124 ptr2
= mmap(ptr
, buf
->head_size
+ buf
->data_size
,
125 PROT_READ
| PROT_WRITE
, MAP_FIXED
| MAP_SHARED
, buf
->fd
, 0);
129 ptr2
= mmap(ptr
+ buf
->head_size
+ buf
->data_size
, buf
->data_size
,
130 PROT_READ
| PROT_WRITE
, MAP_FIXED
| MAP_SHARED
, buf
->fd
,
132 if (ptr2
!= ptr
+ buf
->head_size
+ buf
->data_size
)
136 buf
->data
= ptr
+ buf
->head_size
;
140 munmap(ptr
, buf
->head_size
+ 2 * buf
->data_size
);
145 writev_retry(int fd
, struct iovec
*iov
, int iov_len
, int sock_fd
)
147 uint8_t fd_buf
[CMSG_SPACE(sizeof(int))] = { 0 };
148 struct msghdr msghdr
= { 0 };
149 struct cmsghdr
*cmsg
;
153 msghdr
.msg_iov
= iov
,
154 msghdr
.msg_iovlen
= iov_len
,
155 msghdr
.msg_control
= fd_buf
;
156 msghdr
.msg_controllen
= sizeof(fd_buf
);
158 cmsg
= CMSG_FIRSTHDR(&msghdr
);
159 cmsg
->cmsg_type
= SCM_RIGHTS
;
160 cmsg
->cmsg_level
= SOL_SOCKET
;
161 cmsg
->cmsg_len
= CMSG_LEN(sizeof(int));
163 pfd
= (int *) CMSG_DATA(cmsg
);
164 msghdr
.msg_controllen
= cmsg
->cmsg_len
;
170 msghdr
.msg_control
= NULL
;
171 msghdr
.msg_controllen
= 0;
176 cur_len
= sendmsg(fd
, &msghdr
, 0);
178 struct pollfd pfd
= {
199 while (cur_len
>= (ssize_t
) iov
->iov_len
) {
200 cur_len
-= iov
->iov_len
;
206 iov
->iov_base
+= cur_len
;
207 iov
->iov_len
-= cur_len
;
208 msghdr
.msg_iov
= iov
;
209 msghdr
.msg_iovlen
= iov_len
;
212 /* Should never reach here */
217 recv_retry(int fd
, struct iovec
*iov
, bool wait
, int *recv_fd
)
219 uint8_t fd_buf
[CMSG_SPACE(sizeof(int))] = { 0 };
220 struct msghdr msghdr
= { 0 };
221 struct cmsghdr
*cmsg
;
226 msghdr
.msg_iov
= iov
,
227 msghdr
.msg_iovlen
= 1,
228 msghdr
.msg_control
= fd_buf
;
229 msghdr
.msg_controllen
= sizeof(fd_buf
);
231 cmsg
= CMSG_FIRSTHDR(&msghdr
);
232 cmsg
->cmsg_type
= SCM_RIGHTS
;
233 cmsg
->cmsg_level
= SOL_SOCKET
;
234 cmsg
->cmsg_len
= CMSG_LEN(sizeof(int));
236 pfd
= (int *) CMSG_DATA(cmsg
);
238 while (iov
->iov_len
> 0) {
240 msghdr
.msg_control
= fd_buf
;
241 msghdr
.msg_controllen
= cmsg
->cmsg_len
;
243 msghdr
.msg_control
= NULL
;
244 msghdr
.msg_controllen
= 0;
248 bytes
= recvmsg(fd
, &msghdr
, 0);
271 iov
->iov_len
-= bytes
;
272 iov
->iov_base
+= bytes
;
275 if (iov
->iov_len
> 0) {
276 struct pollfd pfd
= {
282 ret
= poll(&pfd
, 1, UDEBUG_TIMEOUT
);
283 } while (ret
< 0 && errno
== EINTR
);
285 if (!(pfd
.revents
& POLLIN
))
294 udebug_send_msg(struct udebug
*ctx
, struct udebug_client_msg
*msg
,
295 struct blob_attr
*meta
, int fd
)
297 struct iovec iov
[2] = {
298 { .iov_base
= msg
, .iov_len
= sizeof(*msg
) },
303 blob_buf_init(&b
, 0);
307 iov
[1].iov_base
= meta
;
308 iov
[1].iov_len
= blob_pad_len(meta
);
309 writev_retry(ctx
->fd
.fd
, iov
, ARRAY_SIZE(iov
), fd
);
313 udebug_recv_msg(struct udebug
*ctx
, struct udebug_client_msg
*msg
, int *fd
,
318 .iov_len
= sizeof(*msg
)
322 ret
= recv_retry(ctx
->fd
.fd
, &iov
, wait
, fd
);
324 __udebug_disconnect(ctx
, true);
326 return ret
== sizeof(*msg
);
329 static struct udebug_client_msg
*
330 __udebug_poll(struct udebug
*ctx
, int *fd
, bool wait
)
332 static struct udebug_client_msg msg
= {};
334 while (udebug_recv_msg(ctx
, &msg
, fd
, wait
)) {
335 struct udebug_remote_buf
*rb
;
338 if (msg
.type
!= CL_MSG_RING_NOTIFY
)
347 key
= (void *)(uintptr_t)msg
.id
;
348 rb
= avl_find_element(&ctx
->remote_rings
, key
, rb
, node
);
349 if (!rb
|| !rb
->poll
)
352 if (ctx
->poll_handle
>= 0)
353 __atomic_fetch_or(&rb
->buf
.hdr
->notify
,
354 1UL << ctx
->poll_handle
,
356 ctx
->notify_cb(ctx
, rb
);
362 static struct udebug_client_msg
*
363 udebug_wait_for_response(struct udebug
*ctx
, struct udebug_client_msg
*msg
, int *rfd
)
365 int type
= msg
->type
;
372 msg
= __udebug_poll(ctx
, &fd
, true);
373 } while (msg
&& msg
->type
!= type
);
386 udebug_buf_msg(struct udebug_buf
*buf
, enum udebug_client_msg_type type
)
388 struct udebug_client_msg msg
= {
393 udebug_send_msg(buf
->ctx
, &msg
, NULL
, -1);
394 udebug_wait_for_response(buf
->ctx
, &msg
, NULL
);
397 static size_t __udebug_headsize(unsigned int ring_size
, unsigned int page_size
)
399 ring_size
*= sizeof(struct udebug_ptr
);
400 return ALIGN(sizeof(struct udebug_hdr
) + ring_size
, page_size
);
403 int udebug_buf_open(struct udebug_buf
*buf
, int fd
, uint32_t ring_size
, uint32_t data_size
)
405 INIT_LIST_HEAD(&buf
->list
);
407 buf
->ring_size
= ring_size
;
408 buf
->head_size
= __udebug_headsize(ring_size
, sysconf(_SC_PAGESIZE
));
409 buf
->data_size
= data_size
;
411 if (buf
->ring_size
> (1U << 24) || buf
->data_size
> (1U << 29))
414 if (__udebug_buf_map(buf
))
417 if (buf
->ring_size
!= buf
->hdr
->ring_size
||
418 buf
->data_size
!= buf
->hdr
->data_size
) {
419 munmap(buf
->hdr
, buf
->head_size
+ 2 * buf
->data_size
);
427 int udebug_buf_init(struct udebug_buf
*buf
, size_t entries
, size_t size
)
429 uint32_t pagesz
= sysconf(_SC_PAGESIZE
);
430 char filename
[] = "/udebug.XXXXXX";
431 unsigned int order
= 12;
432 uint8_t ring_order
= 5;
436 INIT_LIST_HEAD(&buf
->list
);
439 while(size
> 1U << order
)
442 while (entries
> 1U << ring_order
)
444 entries
= 1 << ring_order
;
446 if (size
> (1U << 29) || entries
> (1U << 24))
449 head_size
= __udebug_headsize(entries
, pagesz
);
450 while (ALIGN(sizeof(*buf
->hdr
) + (entries
* 2) * sizeof(struct udebug_ptr
), pagesz
) == head_size
)
453 fd
= shm_open_anon(filename
);
457 if (ftruncate(fd
, head_size
+ size
) < 0)
460 buf
->head_size
= head_size
;
461 buf
->data_size
= size
;
462 buf
->ring_size
= entries
;
464 if (__udebug_buf_map(buf
))
468 buf
->hdr
->ring_size
= entries
;
469 buf
->hdr
->data_size
= size
;
471 /* ensure hdr changes are visible */
472 __sync_synchronize();
481 static void *udebug_buf_alloc(struct udebug_buf
*buf
, uint32_t ofs
, uint32_t len
)
483 struct udebug_hdr
*hdr
= buf
->hdr
;
485 hdr
->data_used
= u32_max(hdr
->data_used
, ofs
+ len
+ 1);
487 /* ensure that data_used update is visible before clobbering data */
488 __sync_synchronize();
490 return udebug_buf_ptr(buf
, ofs
);
493 uint64_t udebug_buf_flags(struct udebug_buf
*buf
)
495 struct udebug_hdr
*hdr
= buf
->hdr
;
501 flags
= hdr
->flags
[0];
502 if (sizeof(flags
) != sizeof(uintptr_t))
503 flags
|= ((uint64_t)hdr
->flags
[1]) << 32;
508 void udebug_entry_init_ts(struct udebug_buf
*buf
, uint64_t timestamp
)
510 struct udebug_hdr
*hdr
= buf
->hdr
;
511 struct udebug_ptr
*ptr
;
516 ptr
= udebug_ring_ptr(hdr
, hdr
->head
);
517 ptr
->start
= hdr
->data_head
;
519 ptr
->timestamp
= timestamp
;
522 void *udebug_entry_append(struct udebug_buf
*buf
, const void *data
, uint32_t len
)
524 struct udebug_hdr
*hdr
= buf
->hdr
;
525 struct udebug_ptr
*ptr
;
532 ptr
= udebug_ring_ptr(hdr
, hdr
->head
);
533 ofs
= ptr
->start
+ ptr
->len
;
534 if (ptr
->len
+ len
> buf
->data_size
/ 2)
537 ret
= udebug_buf_alloc(buf
, ofs
, len
);
539 memcpy(ret
, data
, len
);
545 uint16_t udebug_entry_trim(struct udebug_buf
*buf
, uint16_t len
)
547 struct udebug_hdr
*hdr
= buf
->hdr
;
548 struct udebug_ptr
*ptr
;
553 ptr
= udebug_ring_ptr(hdr
, hdr
->head
);
560 void udebug_entry_set_length(struct udebug_buf
*buf
, uint16_t len
)
562 struct udebug_hdr
*hdr
= buf
->hdr
;
563 struct udebug_ptr
*ptr
;
568 ptr
= udebug_ring_ptr(hdr
, hdr
->head
);
572 int udebug_entry_printf(struct udebug_buf
*buf
, const char *fmt
, ...)
578 ret
= udebug_entry_vprintf(buf
, fmt
, ap
);
584 int udebug_entry_vprintf(struct udebug_buf
*buf
, const char *fmt
, va_list ap
)
586 struct udebug_hdr
*hdr
= buf
->hdr
;
587 struct udebug_ptr
*ptr
;
595 ptr
= udebug_ring_ptr(hdr
, hdr
->head
);
596 ofs
= ptr
->start
+ ptr
->len
;
597 if (ptr
->len
> buf
->data_size
/ 2)
600 str
= udebug_buf_alloc(buf
, ofs
, UDEBUG_MIN_ALLOC_LEN
);
601 len
= vsnprintf(str
, UDEBUG_MIN_ALLOC_LEN
, fmt
, ap
);
602 if (len
<= UDEBUG_MIN_ALLOC_LEN
)
605 if (ptr
->len
+ len
> buf
->data_size
/ 2)
608 udebug_buf_alloc(buf
, ofs
, len
+ 1);
609 len
= vsnprintf(str
, len
, fmt
, ap
);
616 void udebug_entry_add(struct udebug_buf
*buf
)
618 struct udebug_hdr
*hdr
= buf
->hdr
;
619 struct udebug_ptr
*ptr
;
626 ptr
= udebug_ring_ptr(hdr
, hdr
->head
);
628 /* ensure strings are always 0-terminated */
629 data
= udebug_buf_ptr(buf
, ptr
->start
+ ptr
->len
);
631 hdr
->data_head
= ptr
->start
+ ptr
->len
+ 1;
633 /* ensure that all data changes are visible before advancing head */
634 __sync_synchronize();
636 u32_set(&hdr
->head
, u32_get(&hdr
->head
) + 1);
637 if (!u32_get(&hdr
->head
))
638 u32_set(&hdr
->head_hi
, u32_get(&hdr
->head_hi
) + 1);
640 /* ensure that head change is visible */
641 __sync_synchronize();
643 notify
= __atomic_exchange_n(&hdr
->notify
, 0, __ATOMIC_RELAXED
);
645 struct udebug_client_msg msg
= {
646 .type
= CL_MSG_RING_NOTIFY
,
648 .notify_mask
= notify
,
650 blob_buf_init(&b
, 0);
652 udebug_send_msg(buf
->ctx
, &msg
, b
.head
, -1);
655 void udebug_buf_free(struct udebug_buf
*buf
)
657 struct udebug
*ctx
= buf
->ctx
;
659 if (!list_empty(&buf
->list
) && buf
->list
.prev
)
660 list_del(&buf
->list
);
662 if (ctx
&& ctx
->fd
.fd
>= 0)
663 udebug_buf_msg(buf
, CL_MSG_RING_REMOVE
);
665 munmap(buf
->hdr
, buf
->head_size
+ 2 * buf
->data_size
);
667 memset(buf
, 0, sizeof(*buf
));
671 __udebug_buf_add(struct udebug
*ctx
, struct udebug_buf
*buf
)
673 struct udebug_client_msg msg
= {
674 .type
= CL_MSG_RING_ADD
,
676 .ring_size
= buf
->hdr
->ring_size
,
677 .data_size
= buf
->hdr
->data_size
,
679 const struct udebug_buf_meta
*meta
= buf
->meta
;
682 blob_buf_init(&b
, 0);
683 blobmsg_add_string(&b
, "name", meta
->name
);
684 c
= blobmsg_open_array(&b
, "flags");
685 for (size_t i
= 0; i
< meta
->n_flags
; i
++) {
686 const struct udebug_buf_flag
*flag
= &meta
->flags
[i
];
687 void *e
= blobmsg_open_array(&b
, NULL
);
688 blobmsg_add_string(&b
, NULL
, flag
->name
);
689 blobmsg_add_u64(&b
, NULL
, flag
->mask
);
690 blobmsg_close_array(&b
, e
);
692 blobmsg_close_array(&b
, c
);
694 udebug_send_msg(ctx
, &msg
, b
.head
, buf
->fd
);
695 udebug_wait_for_response(ctx
, &msg
, NULL
);
698 int udebug_buf_add(struct udebug
*ctx
, struct udebug_buf
*buf
,
699 const struct udebug_buf_meta
*meta
)
704 list_add_tail(&buf
->list
, &ctx
->local_rings
);
707 buf
->id
= ctx
->next_id
++;
708 buf
->hdr
->format
= meta
->format
;
709 buf
->hdr
->sub_format
= meta
->sub_format
;
712 __udebug_buf_add(ctx
, buf
);
717 void udebug_init(struct udebug
*ctx
)
719 INIT_LIST_HEAD(&ctx
->local_rings
);
720 avl_init(&ctx
->remote_rings
, udebug_id_cmp
, true, NULL
);
722 ctx
->poll_handle
= -1;
725 static void udebug_reconnect_cb(struct uloop_timeout
*t
)
727 struct udebug
*ctx
= container_of(t
, struct udebug
, reconnect
);
729 if (udebug_connect(ctx
, ctx
->socket_path
) < 0) {
730 uloop_timeout_set(&ctx
->reconnect
, 1000);
734 udebug_add_uloop(ctx
);
737 void udebug_auto_connect(struct udebug
*ctx
, const char *path
)
739 free(ctx
->socket_path
);
740 ctx
->reconnect
.cb
= udebug_reconnect_cb
;
741 ctx
->socket_path
= path
? strdup(path
) : NULL
;
745 udebug_reconnect_cb(&ctx
->reconnect
);
748 int udebug_connect(struct udebug
*ctx
, const char *path
)
750 struct udebug_remote_buf
*rb
;
751 struct udebug_buf
*buf
;
758 path
= UDEBUG_SOCK_NAME
;
760 ctx
->fd
.fd
= usock(USOCK_UNIX
, path
, NULL
);
764 list_for_each_entry(buf
, &ctx
->local_rings
, list
)
765 __udebug_buf_add(ctx
, buf
);
767 avl_for_each_element(&ctx
->remote_rings
, rb
, node
) {
772 udebug_remote_buf_set_poll(ctx
, rb
, true);
778 void udebug_poll(struct udebug
*ctx
)
780 while (__udebug_poll(ctx
, NULL
, false));
783 struct udebug_client_msg
*
784 udebug_send_and_wait(struct udebug
*ctx
, struct udebug_client_msg
*msg
, int *rfd
)
786 udebug_send_msg(ctx
, msg
, NULL
, -1);
788 return udebug_wait_for_response(ctx
, msg
, rfd
);
791 static void udebug_fd_cb(struct uloop_fd
*fd
, unsigned int events
)
793 struct udebug
*ctx
= container_of(fd
, struct udebug
, fd
);
796 __udebug_disconnect(ctx
, true);
801 void udebug_add_uloop(struct udebug
*ctx
)
803 if (ctx
->fd
.registered
)
806 ctx
->fd
.cb
= udebug_fd_cb
;
807 uloop_fd_add(&ctx
->fd
, ULOOP_READ
);
810 void udebug_free(struct udebug
*ctx
)
812 struct udebug_remote_buf
*rb
, *tmp
;
813 struct udebug_buf
*buf
;
815 free(ctx
->socket_path
);
816 ctx
->socket_path
= NULL
;
818 __udebug_disconnect(ctx
, false);
819 uloop_timeout_cancel(&ctx
->reconnect
);
821 while (!list_empty(&ctx
->local_rings
)) {
822 buf
= list_first_entry(&ctx
->local_rings
, struct udebug_buf
, list
);
823 udebug_buf_free(buf
);
826 avl_for_each_element_safe(&ctx
->remote_rings
, rb
, node
, tmp
)
827 udebug_remote_buf_unmap(ctx
, rb
);