1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
| | #include "pthread_impl.h"
#include "__lock.h"
weak_alias(__lock_fast, __lock);
weak_alias(__unlock_fast, __unlock);
weak_alias(__unlock_requeue_fast, __unlock_requeue);
/* __unlock_requeue handles the case where we have a second futex
queue to which the a waiting thread should be scheduled if shared
is false. This is e.g the case when l is the futex of a condition
variable and r is the one of a private mutex: if the mutex is
private, it is more efficient to just insert the thread in the
queue of that mutex, instead of waking him up and put it back to
sleep immediately when trying to lock the mutex. */
void __unlock_requeue_slow(volatile int *l, volatile int *r, int shared)
{
if (shared) __wake(l, 1, 1);
else __syscall(SYS_futex, l, FUTEX_REQUEUE|__futex_private, 0, 1, r);
}
void __lock_slow(volatile int *l, int current)
{
/* A first spin lock acquisition loop, for the case of
medium congestion. */
for (unsigned i = 0; i < 10; ++i) {
if (current < 0) current -= INT_MIN + 1;
// assertion: current >= 0
int val = a_cas(l, current, INT_MIN + (current + 1));
if (val == current) return;
current = val;
}
// Spinning failed, so mark ourselves as being inside the CS.
current = a_fetch_add(l, 1) + 1;
/* The main lock acquisition loop for heavy congestion. The only
change to the value performed inside that loop is a successful
lock via the CAS that acquires the lock. */
for (;;) {
/* We can only go into wait, if we know that somebody holds the
lock and will eventually wake us up, again. */
if (current < 0) {
__futexwait(l, current, 1);
current -= INT_MIN + 1;
}
/* assertion: current > 0, because the count
includes us already. */
int val = a_cas(l, current, INT_MIN + current);
if (val == current) return;
current = val;
}
}
|