mailing list of musl libc
 help / color / mirror / code / Atom feed
* First draft of new synccall, seems to be working
@ 2015-01-15  4:33 Rich Felker
  2015-01-15 12:52 ` Rich Felker
  0 siblings, 1 reply; 2+ messages in thread
From: Rich Felker @ 2015-01-15  4:33 UTC (permalink / raw)
  To: musl

[-- Attachment #1: Type: text/plain, Size: 820 bytes --]

I just wrote the first draft of the new __synccall, used for
multithreaded set*id(). All the concepts seem sound and seem to be
working. It's missing the code to block creation of unboundedly more
threads while __synccall is running, but otherwise everything's in
place. Both the /proc/self/task processing logic and the PI futex
logic seem to be working as expected. I haven't yet tested in the
presence of multiple realtime priorities where there's a risk of some
threads never getting a chance to run; I need a framework with a
high-RT-prio supervisor process to kill the test if it runs too long,
because I don't want to be rebooting boxes over and over to test. :)

Attached is the current draft of the code, as a source file rather
than a patch since the whole file is basically rewritten. Comments
welcome.

Rich

[-- Attachment #2: synccall.c --]
[-- Type: text/plain, Size: 3767 bytes --]

#include "pthread_impl.h"
#include <semaphore.h>
#include <unistd.h>
#include <dirent.h>
#include <string.h>
#include <ctype.h>
#include "futex.h"
#include <../dirent/__dirent.h>

static struct chain {
	struct chain *next;
	int tid;
	sem_t start_sem, next_sem, finish_sem;
} *head;

static int synccall_lock[2];
static int data_lock[2];
static int cur_tid;
static void (*callback)(void *), *context;

static void handler(int sig)
{
	struct chain ch;
	int old_errno = errno;

	sem_init(&ch.start_sem, 0, 0);
	sem_init(&ch.next_sem, 0, 0);
	sem_init(&ch.finish_sem, 0, 0);

	LOCK(data_lock);
	ch.tid = __syscall(SYS_gettid);
	ch.next = head;
	head = &ch;
	UNLOCK(data_lock);
	if (a_cas(&cur_tid, ch.tid, 0) == (ch.tid | 0x80000000))
		__syscall(SYS_futex, &cur_tid, FUTEX_UNLOCK_PI|FUTEX_PRIVATE);

	sem_wait(&ch.start_sem);
	callback(context);
	sem_post(&ch.next_sem);
	sem_wait(&ch.finish_sem);

	errno = old_errno;
}


static int is_member(struct chain *p, int tid)
{
	while (p) {
		if (p->tid == tid) return 1;
		p = p->next;
	}
	return 0;
}

void __synccall(void (*func)(void *), void *ctx)
{
	sigset_t oldmask;
	int cs;
	int pid, self;
	int i;
	DIR dir = {0};
	struct dirent *de;
	struct sigaction sa = { .sa_flags = 0, .sa_handler = handler };
	struct chain *cp, *next;
	struct timespec ts;

	__block_all_sigs(&oldmask);
	LOCK(synccall_lock);
	pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);

	LOCK(data_lock);
	head = 0;
	callback = func;
	context = ctx;
	UNLOCK(data_lock);

	if (!libc.threaded) goto single_threaded;

	/* Block even impl-internal signals */
	memset(&sa.sa_mask, -1, sizeof sa.sa_mask);
	__libc_sigaction(SIGSYNCCALL, &sa, 0);

	pid = __syscall(SYS_getpid);
	self = __syscall(SYS_gettid);

	/* Since opendir is not AS-safe, the DIR needs to be setup manually
	 * in automatic storage. Thankfully this is easy. */
	dir.fd = open("/proc/self/task", O_RDONLY|O_DIRECTORY|O_CLOEXEC);
	if (dir.fd < 0) goto out;

	/* Initially send one signal per counted thread. But since we can't
	 * synchronize with thread creation/exit here, there could be too
	 * few signals. This initial signaling is just an optimization, not
	 * part of the logic. */
	for (i=libc.threads_minus_1; i; i--)
		__syscall(SYS_kill, pid, SIGSYNCCALL);

	/* Loop scanning the kernel-provided thread list until it shows no
	 * threads that have not already replied to the signal. */
	for (;;) {
		int miss_cnt = 0;
		while ((de = readdir(&dir))) {
			if (!isdigit(de->d_name[0])) continue;
			int tid = atoi(de->d_name);
			if (tid == self) continue;
			LOCK(data_lock);
			if (is_member(head, tid)) {
				UNLOCK(data_lock);
				continue;
			}
			UNLOCK(data_lock);

			__syscall(SYS_tgkill, pid, tid, SIGSYNCCALL);

			/* The FUTEX_LOCK_PI operation is used to loan priority
			 * to the target thread, which otherwise may be unable
			 * to run. Timeout is necessary because there is a race
			 * condition where the tid may be reused by a different
			 * process. */
			clock_gettime(CLOCK_REALTIME, &ts);
			ts.tv_nsec += 10000000;
			if (ts.tv_nsec >= 1000000000) {
				ts.tv_sec++;
				ts.tv_nsec -= 1000000000;
			}
			a_store(&cur_tid, tid);
			__syscall(SYS_futex, &cur_tid, FUTEX_LOCK_PI|FUTEX_PRIVATE, 0, &ts);

			LOCK(data_lock);
			if (cur_tid != self && !is_member(head, tid))
				miss_cnt++;
			UNLOCK(data_lock);
		}
		if (!miss_cnt) break;
		rewinddir(&dir);
	}
	close(dir.fd);

	for (cp=head; cp; cp=next) {
		next = cp->next;
		sem_post(&cp->start_sem);
		sem_wait(&cp->next_sem);
	}

	sa.sa_handler = SIG_IGN;
	__libc_sigaction(SIGSYNCCALL, &sa, 0);

single_threaded:
	func(ctx);

	for (cp=head; cp; cp=next) {
		next = cp->next;
		sem_post(&cp->finish_sem);
	}

out:
	pthread_setcancelstate(cs, 0);
	UNLOCK(synccall_lock);
	__restore_sigs(&oldmask);
}

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2015-01-15 12:52 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-01-15  4:33 First draft of new synccall, seems to be working Rich Felker
2015-01-15 12:52 ` Rich Felker

Code repositories for project(s) associated with this public inbox

	https://git.vuxu.org/mirror/musl/

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).