toolchain/gcc: switch to version 11 by default
[openwrt/staging/chunkeey.git] / toolchain / musl / patches / 500-0004-restore-lock-skipping-for-processes-that-return-to-s.patch
1 From 8d81ba8c0bc6fe31136cb15c9c82ef4c24965040 Mon Sep 17 00:00:00 2001
2 From: Rich Felker <dalias@aerifal.cx>
3 Date: Fri, 22 May 2020 17:45:47 -0400
4 Subject: [PATCH 4/4] restore lock-skipping for processes that return to
5 single-threaded state
6
7 the design used here relies on the barrier provided by the first lock
8 operation after the process returns to single-threaded state to
9 synchronize with actions by the last thread that exited. by storing
10 the intent to change modes in the same object used to detect whether
11 locking is needed, it's possible to avoid an extra (possibly costly)
12 memory load after the lock is taken.
13 ---
14 src/internal/libc.h | 1 +
15 src/malloc/malloc.c | 5 ++++-
16 src/thread/__lock.c | 4 +++-
17 src/thread/pthread_create.c | 8 ++++----
18 4 files changed, 12 insertions(+), 6 deletions(-)
19
20 --- a/src/internal/libc.h
21 +++ b/src/internal/libc.h
22 @@ -21,6 +21,7 @@ struct __libc {
23 char can_do_threads;
24 char threaded;
25 char secure;
26 + volatile signed char need_locks;
27 int threads_minus_1;
28 size_t *auxv;
29 struct tls_module *tls_head;
30 --- a/src/malloc/malloc.c
31 +++ b/src/malloc/malloc.c
32 @@ -26,8 +26,11 @@ int __malloc_replaced;
33
34 static inline void lock(volatile int *lk)
35 {
36 - if (libc.threaded)
37 + int need_locks = libc.need_locks;
38 + if (need_locks) {
39 while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
40 + if (need_locks < 0) libc.need_locks = 0;
41 + }
42 }
43
44 static inline void unlock(volatile int *lk)
45 --- a/src/thread/__lock.c
46 +++ b/src/thread/__lock.c
47 @@ -18,9 +18,11 @@
48
49 void __lock(volatile int *l)
50 {
51 - if (!libc.threaded) return;
52 + int need_locks = libc.need_locks;
53 + if (!need_locks) return;
54 /* fast path: INT_MIN for the lock, +1 for the congestion */
55 int current = a_cas(l, 0, INT_MIN + 1);
56 + if (need_locks < 0) libc.need_locks = 0;
57 if (!current) return;
58 /* A first spin loop, for medium congestion. */
59 for (unsigned i = 0; i < 10; ++i) {
60 --- a/src/thread/pthread_create.c
61 +++ b/src/thread/pthread_create.c
62 @@ -118,8 +118,8 @@ _Noreturn void __pthread_exit(void *resu
63 * until the lock is released, which only happens after SYS_exit
64 * has been called, via the exit futex address pointing at the lock.
65 * This needs to happen after any possible calls to LOCK() that might
66 - * skip locking if libc.threads_minus_1 is zero. */
67 - libc.threads_minus_1--;
68 + * skip locking if process appears single-threaded. */
69 + if (!--libc.threads_minus_1) libc.need_locks = -1;
70 self->next->prev = self->prev;
71 self->prev->next = self->next;
72 self->prev = self->next = self;
73 @@ -339,7 +339,7 @@ int __pthread_create(pthread_t *restrict
74 ~(1UL<<((SIGCANCEL-1)%(8*sizeof(long))));
75
76 __tl_lock();
77 - libc.threads_minus_1++;
78 + if (!libc.threads_minus_1++) libc.need_locks = 1;
79 ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &__thread_list_lock);
80
81 /* All clone failures translate to EAGAIN. If explicit scheduling
82 @@ -363,7 +363,7 @@ int __pthread_create(pthread_t *restrict
83 new->next->prev = new;
84 new->prev->next = new;
85 } else {
86 - libc.threads_minus_1--;
87 + if (!--libc.threads_minus_1) libc.need_locks = 0;
88 }
89 __tl_unlock();
90 __restore_sigs(&set);