1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
| | #if SAFE_STACK
#define _GNU_SOURCE
#include "pthread_impl.h"
#include <stdbool.h>
#include <stdint.h>
#include <sys/mman.h>
static bool unsafe_stack_ptr_inited = false;
/* minimum base address of all existing safe stacks */
__attribute__((__visibility__("hidden")))
uintptr_t __stack_base;
void *__mmap(void *, size_t, int, int, int, off_t);
int __munmap(void *, size_t);
int __mprotect(void *, size_t, int);
#if SEP_STACK_SEG
void __restrict_segments(void);
#endif
/* There are no checks for overflows past the end of this stack buffer. It must
* be allocated with adequate space to meet the requirements of all of the code
* that runs prior to __init_unsafe_stack allocating a new unsafe stack. This
* buffer is not used after that. */
static uint8_t preinit_us[PAGE_SIZE];
__attribute__((no_sanitize("safe-stack"), __visibility__("hidden")))
void __init_unsafe_stack(void)
{
void *stack_base;
size_t stack_size;
pthread_attr_t attr;
struct pthread *self;
if (unsafe_stack_ptr_inited)
return;
self = __pthread_self();
/* Set the unsafe stack pointer in the current TCB to the statically-allocated
* unsafe stack, since some of the subroutines invoked below may use the
* unsafe stack. */
self->unsafe_stack_ptr = preinit_us + sizeof(preinit_us);
if (pthread_getattr_np(self, &attr) != 0)
a_crash();
if (pthread_attr_getstack(&attr, (void **)&__stack_base, &stack_size) != 0)
a_crash();
stack_size *= 2;
/* This mapping is not reclaimed until the process exits. */
uint8_t *unsafe_stack = __mmap(0, stack_size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
if (unsafe_stack == MAP_FAILED)
a_crash();
unsafe_stack += DEFAULT_GUARD_SIZE;
stack_size -= DEFAULT_GUARD_SIZE;
if (__mprotect(unsafe_stack, stack_size, PROT_READ|PROT_WRITE)
&& errno != ENOSYS)
a_crash();
self->unsafe_stack_ptr = unsafe_stack + stack_size;
unsafe_stack_ptr_inited = true;
}
static struct pthread preinit_tcb = {
.unsafe_stack_ptr = preinit_us + sizeof(preinit_us)
};
/* Install a TCB with just the unsafe stack pointer initialized. */
__attribute__((no_sanitize("safe-stack"), __visibility__("hidden")))
void __preinit_unsafe_stack(void)
{
if (unsafe_stack_ptr_inited)
return;
__set_thread_area(&preinit_tcb);
}
#define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
__attribute__((__visibility__("hidden")))
int __safestack_init_thread(struct pthread *restrict new, const pthread_attr_t *restrict attr)
{
size_t size, guard;
unsigned char *map = 0;
new->unsafe_stack_ptr = new->stack;
guard = ROUND(DEFAULT_GUARD_SIZE + attr->_a_guardsize);
size = ROUND(new->stack_size + guard);
uintptr_t try_map = 0;
#if SEP_STACK_SEG
/* Try to allocate the new safe stack just below the lowest existing safe
* stack to help avoid a data segment limit that is too low and causes
* faults when accessing non-stack data above the limit. */
try_map = __stack_base - size;
#endif
map = __mmap((void *)try_map, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
if (map == MAP_FAILED)
goto fail;
if (__mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)
&& errno != ENOSYS) {
__munmap(map, size);
goto fail;
}
new->safe_stack_base = map;
new->safe_stack_size = size;
new->stack = map + size;
#if SEP_STACK_SEG
__restrict_segments();
#endif
return 0;
fail:
return EAGAIN;
}
#endif /*SAFE_STACK*/
|