1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
| | #include "pthread_impl.h"
#include <threads.h>
int __cnd_broadcast(__cnd_t *c)
{
int ret = 0;
__mtx_t * mtx = c->mtx;
/* If the mtx isn't set, this __cnd has never been used for a wait,
nothing to do. */
if (mtx) {
/* We can upload as much tokens as we wish, here since this
__cnd_t is already unlinked from its cnd_t. It will never be
used for any new waiter. */
a_store(&c->tok, INT_MAX);
/* Perform the futex requeue, waking one waiter unless we know
* that the calling thread holds the mutex. */
int wakeup = !mtx->_mt_typ || (mtx->_mt_lck&INT_MAX)!=__pthread_self()->tid;
ret = __syscall(SYS_futex, &c->tok, FUTEX_REQUEUE|THRD_PRIVATE,
0,
INT_MAX, &mtx->_mt_lck, c->tok);
/* Do the bookkeeping for the mutex and wake up one thread eventually. */
if (ret > wakeup) a_fetch_add(&mtx->_mt_wts, ret-wakeup);
if (ret > 0 && wakeup) ret = __syscall(SYS_futex, &mtx->_mt_lck, FUTEX_WAKE|THRD_PRIVATE, 1);
}
return ret < 0 ? thrd_error : thrd_success;
}
int cnd_broadcast(cnd_t * cond)
{
int ret = thrd_success;
/* Critical section protected by spin lock */
a_splck(&cond->_cx_lock);
__cnd_t * c = cond->_cx_cnd;
/* If there are waiters, all will be waiting on c. Since we hold the
spinlock no other waiters can sneak in. Lock them permanently out
of using this one, here. As a consequence we don't have inc the
reference count: their is no change in the total number of
references. */
cond->_cx_cnd = 0;
a_spunl(&cond->_cx_lock);
/* If c is 0, there haven't been any waiters, yet, nothing to do. */
if (c) {
ret = __cnd_broadcast(c);
__cnd_unref(c);
}
return ret;
}
|