aboutsummaryrefslogtreecommitdiffstats
path: root/thread.c
diff options
context:
space:
mode:
authornormal <normal@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2014-05-10 23:48:51 +0000
committernormal <normal@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2014-05-10 23:48:51 +0000
commitf11db2a605d99ef6a0943eba34db355188f8efcb (patch)
tree277ff70e7cc260d26f3028d89f25d5b4603675b4 /thread.c
parent3771a370ad64aae87f751751e80d52d02a1735a9 (diff)
downloadruby-f11db2a605d99ef6a0943eba34db355188f8efcb.tar.gz
vm*: doubly-linked list from ccan to manage vm->living_threads
A doubly-linked list for tracking living threads guarantees constant-time insert/delete performance with no corner cases of a hash table. I chose this ccan implementation of doubly-linked lists over the BSD sys/queue.h implementation since: 1) insertion and removal are both branchless 2) locality is improved if a struct may be a member of multiple lists (0002 patch in Feature 9632 will introduce a secondary list for waiting FDs) This also increases cache locality during iteration: improving performance in a new IO#close benchmark with many sleeping threads while still scanning the same number of threads. vm_thread_close 1.762 * vm_core.h (rb_vm_t): list_head and counter for living_threads (rb_thread_t): vmlt_node for living_threads linkage (rb_vm_living_threads_init): new function wrapper (rb_vm_living_threads_insert): ditto (rb_vm_living_threads_remove): ditto * vm.c (rb_vm_living_threads_foreach): new function wrapper * thread.c (terminate_i, thread_start_func_2, thread_create_core, thread_fd_close_i, thread_fd_close): update to use new APIs * vm.c (vm_mark_each_thread_func, rb_vm_mark, ruby_vm_destruct, vm_memsize, vm_init2, Init_VM): ditto * vm_trace.c (clear_trace_func_i, rb_clear_trace_func): ditto * benchmark/bm_vm_thread_close.rb: added to show improvement * ccan/build_assert/build_assert.h: added as a dependency of list.h * ccan/check_type/check_type.h: ditto * ccan/container_of/container_of.h: ditto * ccan/licenses/BSD-MIT: ditto * ccan/licenses/CC0: ditto * ccan/str/str.h: ditto (stripped of unused macros) * ccan/list/list.h: ditto * common.mk: add CCAN_LIST_INCLUDES [ruby-core:61871][Feature 9632 (part 1)] Apologies for the size of this commit, but I think a good doubly-linked list will be useful for future features, too. This may be used to add ordering to a container_of-based hash table to preserve compatibility if required (e.g. feature 9614). git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@45913 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
Diffstat (limited to 'thread.c')
-rw-r--r--thread.c86
1 files changed, 30 insertions, 56 deletions
diff --git a/thread.c b/thread.c
index a011de1991..0cc64d9556 100644
--- a/thread.c
+++ b/thread.c
@@ -368,12 +368,8 @@ rb_threadptr_trap_interrupt(rb_thread_t *th)
}
static int
-terminate_i(st_data_t key, st_data_t val, rb_thread_t *main_thread)
+terminate_i(rb_thread_t *th, void *main_thread)
{
- VALUE thval = key;
- rb_thread_t *th;
- GetThreadPtr(thval, th);
-
if (th != main_thread) {
thread_debug("terminate_i: %p\n", (void *)th);
rb_threadptr_pending_interrupt_enque(th, eTerminateSignal);
@@ -433,7 +429,7 @@ rb_thread_terminate_all(void)
retry:
thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
- st_foreach(vm->living_threads, terminate_i, (st_data_t)th);
+ rb_vm_living_threads_foreach(vm, terminate_i, th);
while (!rb_thread_alone()) {
int state;
@@ -585,7 +581,7 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_s
}
/* delete self other than main thread from living_threads */
- st_delete_wrap(th->vm->living_threads, th->self);
+ rb_vm_living_threads_remove(th->vm, th);
if (rb_thread_alone()) {
/* I'm last thread. wake up main thread from rb_thread_terminate_all */
rb_threadptr_interrupt(main_th);
@@ -657,7 +653,7 @@ thread_create_core(VALUE thval, VALUE args, VALUE (*fn)(ANYARGS))
th->status = THREAD_KILLED;
rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
}
- st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
+ rb_vm_living_threads_insert(th->vm, th);
return thval;
}
@@ -2067,13 +2063,10 @@ rb_threadptr_reset_raised(rb_thread_t *th)
}
static int
-thread_fd_close_i(st_data_t key, st_data_t val, st_data_t data)
+thread_fd_close_i(rb_thread_t *th, void *fdp)
{
- int fd = (int)data;
- rb_thread_t *th;
- GetThreadPtr((VALUE)key, th);
-
- if (th->waiting_fd == fd) {
+ int *fd = fdp;
+ if (th->waiting_fd == *fd) {
VALUE err = th->vm->special_exceptions[ruby_error_closed_stream];
rb_threadptr_pending_interrupt_enque(th, err);
rb_threadptr_interrupt(th);
@@ -2084,7 +2077,7 @@ thread_fd_close_i(st_data_t key, st_data_t val, st_data_t data)
void
rb_thread_fd_close(int fd)
{
- st_foreach(GET_THREAD()->vm->living_threads, thread_fd_close_i, (st_index_t)fd);
+ rb_vm_living_threads_foreach(GET_THREAD()->vm, thread_fd_close_i, &fd);
}
/*
@@ -2304,11 +2297,9 @@ rb_thread_stop(void)
}
static int
-thread_list_i(st_data_t key, st_data_t val, void *data)
+thread_list_i(rb_thread_t *th, void *data)
{
VALUE ary = (VALUE)data;
- rb_thread_t *th;
- GetThreadPtr((VALUE)key, th);
switch (th->status) {
case THREAD_RUNNABLE:
@@ -2347,7 +2338,7 @@ VALUE
rb_thread_list(void)
{
VALUE ary = rb_ary_new();
- st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
+ rb_vm_living_threads_foreach(GET_THREAD()->vm, thread_list_i, (void *)ary);
return ary;
}
@@ -2925,14 +2916,14 @@ thread_keys_i(ID key, VALUE value, VALUE ary)
static int
vm_living_thread_num(rb_vm_t *vm)
{
- return (int)vm->living_threads->num_entries;
+ return (int)vm->living_thread_num;
}
int
rb_thread_alone(void)
{
int num = 1;
- if (GET_THREAD()->vm->living_threads) {
+ if (!list_empty(&GET_THREAD()->vm->living_threads)) {
num = vm_living_thread_num(GET_THREAD()->vm);
thread_debug("rb_thread_alone: %d\n", num);
}
@@ -3767,28 +3758,23 @@ clear_coverage(void)
}
static void
-rb_thread_atfork_internal(int (*atfork)(st_data_t, st_data_t, st_data_t))
+rb_thread_atfork_internal(int (*atfork)(rb_thread_t *, void *))
{
rb_thread_t *th = GET_THREAD();
rb_vm_t *vm = th->vm;
- VALUE thval = th->self;
vm->main_thread = th;
gvl_atfork(th->vm);
- st_foreach(vm->living_threads, atfork, (st_data_t)th);
- st_clear(vm->living_threads);
- st_insert(vm->living_threads, thval, (st_data_t)th->thread_id);
+ rb_vm_living_threads_foreach(vm, atfork, th);
+ rb_vm_living_threads_init(vm);
+ rb_vm_living_threads_insert(vm, th);
vm->sleeper = 0;
clear_coverage();
}
static int
-terminate_atfork_i(st_data_t key, st_data_t val, st_data_t current_th)
+terminate_atfork_i(rb_thread_t *th, void *current_th)
{
- VALUE thval = key;
- rb_thread_t *th;
- GetThreadPtr(thval, th);
-
if (th != (rb_thread_t *)current_th) {
rb_mutex_abandon_keeping_mutexes(th);
rb_mutex_abandon_locking_mutex(th);
@@ -3808,12 +3794,8 @@ rb_thread_atfork(void)
}
static int
-terminate_atfork_before_exec_i(st_data_t key, st_data_t val, st_data_t current_th)
+terminate_atfork_before_exec_i(rb_thread_t *th, void *current_th)
{
- VALUE thval = key;
- rb_thread_t *th;
- GetThreadPtr(thval, th);
-
if (th != (rb_thread_t *)current_th) {
thread_cleanup_func_before_exec(th);
}
@@ -3881,13 +3863,12 @@ struct thgroup_list_params {
};
static int
-thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
+thgroup_list_i(rb_thread_t *th, void *arg)
{
- VALUE thread = (VALUE)key;
- VALUE ary = ((struct thgroup_list_params *)data)->ary;
- VALUE group = ((struct thgroup_list_params *)data)->group;
- rb_thread_t *th;
- GetThreadPtr(thread, th);
+ struct thgroup_list_params *params = arg;
+ VALUE thread = th->self;
+ VALUE ary = params->ary;
+ VALUE group = params->group;
if (th->thgroup == group) {
rb_ary_push(ary, thread);
@@ -3912,7 +3893,7 @@ thgroup_list(VALUE group)
param.ary = ary;
param.group = group;
- st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
+ rb_vm_living_threads_foreach(GET_THREAD()->vm, thgroup_list_i, &param);
return ary;
}
@@ -5051,12 +5032,9 @@ ruby_native_thread_p(void)
}
static int
-check_deadlock_i(st_data_t key, st_data_t val, int *found)
+check_deadlock_i(rb_thread_t *th, void *arg)
{
- VALUE thval = key;
- rb_thread_t *th;
- GetThreadPtr(thval, th);
-
+ int *found = arg;
if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th)) {
*found = 1;
}
@@ -5076,12 +5054,8 @@ check_deadlock_i(st_data_t key, st_data_t val, int *found)
#ifdef DEBUG_DEADLOCK_CHECK
static int
-debug_i(st_data_t key, st_data_t val, int *found)
+debug_i(rb_thread_t *th, int *found)
{
- VALUE thval = key;
- rb_thread_t *th;
- GetThreadPtr(thval, th);
-
printf("th:%p %d %d", th, th->status, th->interrupt_flag);
if (th->locking_mutex) {
rb_mutex_t *mutex;
@@ -5107,15 +5081,15 @@ rb_check_deadlock(rb_vm_t *vm)
if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
if (patrol_thread && patrol_thread != GET_THREAD()) return;
- st_foreach(vm->living_threads, check_deadlock_i, (st_data_t)&found);
+ rb_vm_living_threads_foreach(vm, check_deadlock_i, &found);
if (!found) {
VALUE argv[2];
argv[0] = rb_eFatal;
argv[1] = rb_str_new2("No live threads left. Deadlock?");
#ifdef DEBUG_DEADLOCK_CHECK
- printf("%d %d %p %p\n", vm->living_threads->num_entries, vm->sleeper, GET_THREAD(), vm->main_thread);
- st_foreach(vm->living_threads, debug_i, (st_data_t)0);
+ printf("%d %d %p %p\n", vm_living_thread_num(vm), vm->sleeper, GET_THREAD(), vm->main_thread);
+ rb_vm_living_threads_foreach(vm, debug_i, 0);
#endif
vm->sleeper--;
rb_threadptr_raise(vm->main_thread, 2, argv);