1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
| | #define _GNU_SOURCE
#include "pthread_impl.h"
#include "stdio_impl.h"
#include "libc.h"
#include <sys/mman.h>
#include <threads.h>
static void dummy_0()
{
}
weak_alias(dummy_0, __pthread_tsd_run_dtors);
weak_alias(dummy_0, __tss_run_dtors);
_Noreturn void __pthread_exit(void *result)
{
pthread_t self = __pthread_self();
sigset_t set;
self->result = result;
while (self->cancelbuf) {
void (*f)(void *) = self->cancelbuf->__f;
void *x = self->cancelbuf->__x;
self->cancelbuf = self->cancelbuf->__next;
f(x);
}
__pthread_tsd_run_dtors();
__tss_run_dtors();
__lock(self->exitlock);
/* Mark this thread dead before decrementing count */
__lock(self->killlock);
self->dead = 1;
/* Block all signals before decrementing the live thread count.
* This is important to ensure that dynamically allocated TLS
* is not under-allocated/over-committed, and possibly for other
* reasons as well. */
__block_all_sigs(&set);
/* Wait to unlock the kill lock, which governs functions like
* pthread_kill which target a thread id, until signals have
* been blocked. This precludes observation of the thread id
* as a live thread (with application code running in it) after
* the thread was reported dead by ESRCH being returned. */
__unlock(self->killlock);
/* It's impossible to determine whether this is "the last thread"
* until performing the atomic decrement, since multiple threads
* could exit at the same time. For the last thread, revert the
* decrement and unblock signals to give the atexit handlers and
* stdio cleanup code a consistent state. */
if (a_fetch_add(&libc.threads_minus_1, -1)==0) {
libc.threads_minus_1 = 0;
__restore_sigs(&set);
exit(0);
}
if (self->locale != &libc.global_locale) {
a_dec(&libc.uselocale_cnt);
if (self->locale->ctype_utf8)
a_dec(&libc.bytelocale_cnt_minus_1);
}
if (self->detached && self->map_base) {
/* Detached threads must avoid the kernel clear_child_tid
* feature, since the virtual address will have been
* unmapped and possibly already reused by a new mapping
* at the time the kernel would perform the write. In
* the case of threads that started out detached, the
* initial clone flags are correct, but if the thread was
* detached later (== 2), we need to clear it here. */
if (self->detached == 2) __syscall(SYS_set_tid_address, 0);
/* The following call unmaps the thread's stack mapping
* and then exits without touching the stack. */
__unmapself(self->map_base, self->map_size);
}
for (;;) __syscall(SYS_exit, 0);
}
weak_alias(__pthread_exit, pthread_exit);
void __do_cleanup_push(struct __ptcb *cb)
{
if (!libc.has_thread_pointer) return;
struct pthread *self = __pthread_self();
cb->__next = self->cancelbuf;
self->cancelbuf = cb;
}
void __do_cleanup_pop(struct __ptcb *cb)
{
if (!libc.has_thread_pointer) return;
__pthread_self()->cancelbuf = cb->__next;
}
/* pthread_key_create.c overrides this */
static volatile size_t dummy = 0;
weak_alias(dummy, __pthread_tsd_size);
static void *dummy_tsd[1] = { 0 };
weak_alias(dummy_tsd, __pthread_tsd_main);
static FILE *volatile dummy_file = 0;
weak_alias(dummy_file, __stdin_used);
weak_alias(dummy_file, __stdout_used);
weak_alias(dummy_file, __stderr_used);
static void init_file_lock(FILE *f)
{
if (f && f->lock<0) f->lock = 0;
}
void __thread_enable(void)
{
for (FILE *f=libc.ofl_head; f; f=f->next)
init_file_lock(f);
init_file_lock(__stdin_used);
init_file_lock(__stdout_used);
init_file_lock(__stderr_used);
__syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, _NSIG/8);
__pthread_self()->tsd = (void **)__pthread_tsd_main;
libc.threaded = 1;
}
|