1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
| | #define _GNU_SOURCE
#include "pthread_impl.h"
#include <stdbool.h>
#include <stdint.h>
#include <sys/mman.h>
#if SAFE_STACK
static bool unsafe_stack_ptr_inited = false;
/* base address of safe stack allocated most recently */
__attribute__((__visibility__("hidden")))
uintptr_t __stack_base;
void *__mmap(void *, size_t, int, int, int, off_t);
int __munmap(void *, size_t);
int __mprotect(void *, size_t, int);
void __restrict_segments(void);
/* There are no checks for overflows past the end of this stack buffer. It must
* be allocated with adequate space to meet the requirements of all of the code
* that runs prior to __init_unsafe_stack allocating a new unsafe stack. This
* buffer is not used after that. */
static unsigned char preinit_us[4096];
__attribute__((__visibility__("hidden")))
void __init_unsafe_stack(void)
{
size_t stack_size;
pthread_attr_t attr;
struct pthread *self;
if (unsafe_stack_ptr_inited)
return;
self = __pthread_self();
/* Set the unsafe stack pointer in the current TCB to the statically-allocated
* unsafe stack, since some of the subroutines invoked below may use the
* unsafe stack. */
self->unsafe_stack_ptr = preinit_us + sizeof(preinit_us);
if (pthread_getattr_np(self, &attr) != 0)
a_crash();
if (pthread_attr_getstack(&attr, (void **)&__stack_base, &stack_size) != 0)
a_crash();
stack_size *= 2;
/* This mapping is not reclaimed until the process exits. */
uint8_t *unsafe_stack = __mmap(0, stack_size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
if (unsafe_stack == MAP_FAILED)
a_crash();
unsafe_stack += DEFAULT_GUARD_SIZE;
stack_size -= DEFAULT_GUARD_SIZE;
if (__mprotect(unsafe_stack, stack_size, PROT_READ|PROT_WRITE)
&& errno != ENOSYS)
a_crash();
self->unsafe_stack_ptr = unsafe_stack + stack_size;
unsafe_stack_ptr_inited = true;
}
static struct pthread preinit_tcb = {
.unsafe_stack_ptr = preinit_us + sizeof(preinit_us)
};
/* Install a TCB with just the unsafe stack pointer initialized. */
__attribute__((__visibility__("hidden")))
void __preinit_unsafe_stack(void)
{
if (unsafe_stack_ptr_inited)
return;
__set_thread_area(&preinit_tcb);
}
#define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
uintptr_t __safestack_addr_hint(size_t size);
__attribute__((__visibility__("hidden")))
int __safestack_init_thread(struct pthread *restrict new, const pthread_attr_t *restrict attr)
{
size_t size, guard;
unsigned char *map = 0;
new->unsafe_stack_ptr = new->stack;
guard = ROUND(DEFAULT_GUARD_SIZE + attr->_a_guardsize);
size = ROUND(new->stack_size + guard);
uintptr_t try_map = __safestack_addr_hint(size);
map = __mmap((void *)try_map, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
if (map == MAP_FAILED)
goto fail;
if (__mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)
&& errno != ENOSYS) {
__munmap(map, size);
goto fail;
}
new->safe_stack_base = map;
new->safe_stack_size = size;
new->stack = map + size;
__stack_base = (uintptr_t)map;
__restrict_segments();
return 0;
fail:
return EAGAIN;
}
void __safestack_pthread_exit(struct pthread *self)
{
if (self->detached && self->safe_stack_base)
__munmap(self->safe_stack_base, self->safe_stack_size);
}
#else /*SAFE_STACK*/
static void dummy(void) {}
weak_alias(dummy, __preinit_unsafe_stack);
weak_alias(dummy, __init_unsafe_stack);
int dummy1(struct pthread *restrict thr, const pthread_attr_t *restrict attr)
{
return 0;
}
weak_alias(dummy1, __safestack_init_thread);
void dummy2(struct pthread *self) {}
weak_alias(dummy2, __safestack_pthread_exit);
#endif /*SAFE_STACK*/
|