aboutsummaryrefslogtreecommitdiffstats
path: root/thread.c
diff options
context:
space:
mode:
authornormal <normal@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2017-05-08 00:18:53 +0000
committernormal <normal@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2017-05-08 00:18:53 +0000
commitb8ad658ce30d6e6edf2dea0fab5aeb330343a9ac (patch)
treee4d4acc41809b7f15893071259909712109632d4 /thread.c
parentdecb336b1b70636efcce8473becb5475668994d5 (diff)
downloadruby-b8ad658ce30d6e6edf2dea0fab5aeb330343a9ac.tar.gz
reduce rb_mutex_t size from 160 to 80 bytes on 64-bit
Instead of relying on a native condition variable and mutex for every Ruby Mutex object, use a doubly linked-list to implement a waiter queue in the Mutex. The immediate benefit of this is reducing the size of every Mutex object, as some projects have many objects requiring synchronization. In the future, this technique using a linked-list and on-stack list node (struct mutex_waiter) should allow us to easily transition to M:N threading model, as we can avoid the native thread dependency to implement Mutex. We already do something similar for autoload in variable.c, and this was inspired by the Linux kernel wait queue (as ccan/list is inspired by the Linux kernel linked-list). Finaly, there are big performance improvements for Mutex benchmarks, especially in contended cases: measure target: real name |trunk |built ----------------|------:|------: loop_whileloop2 | 0.149| 0.148 vm2_mutex* | 0.893| 0.651 vm_thread_mutex1| 0.809| 0.624 vm_thread_mutex2| 2.608| 0.628 vm_thread_mutex3| 28.227| 0.881 Speedup ratio: compare with the result of `trunk' (greater is better) name |built ----------------|------: loop_whileloop2 | 1.002 vm2_mutex* | 1.372 vm_thread_mutex1| 1.297 vm_thread_mutex2| 4.149 vm_thread_mutex3| 32.044 Tested on AMD FX-8320 8-core at 3.5GHz * thread_sync.c (struct mutex_waiter): new on-stack struct (struct rb_mutex_struct): remove native lock/cond, use ccan/list (rb_mutex_num_waiting): new function for debug_deadlock_check (mutex_free): remove native_*_destroy (mutex_alloc): initialize waitq, remove native_*_initialize (rb_mutex_trylock): remove native_mutex_{lock,unlock} (lock_func): remove (lock_interrupt): remove (rb_mutex_lock): rewrite waiting path to use native_sleep + ccan/list (rb_mutex_unlock_th): rewrite to wake up from native_sleep using rb_threadptr_interrupt (rb_mutex_abandon_all): empty waitq * thread.c (debug_deadlock_check): update for new struct (rb_check_deadlock): ditto [ruby-core:80913] [Feature #13517] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58604 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
Diffstat (limited to 'thread.c')
-rw-r--r--thread.c14
1 files changed, 3 insertions, 11 deletions
diff --git a/thread.c b/thread.c
index a622f4bf4f..0c843adabb 100644
--- a/thread.c
+++ b/thread.c
@@ -4940,15 +4940,9 @@ debug_deadlock_check(rb_vm_t *vm, VALUE msg)
th->self, th, thread_id_str(th), th->interrupt_flag);
if (th->locking_mutex) {
rb_mutex_t *mutex;
- struct rb_thread_struct volatile *mth;
- int waiting;
GetMutexPtr(th->locking_mutex, mutex);
-
- native_mutex_lock(&mutex->lock);
- mth = mutex->th;
- waiting = mutex->cond_waiting;
- native_mutex_unlock(&mutex->lock);
- rb_str_catf(msg, " mutex:%p cond:%d", mth, waiting);
+ rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
+ mutex->th, rb_mutex_num_waiting(mutex));
}
{
rb_thread_list_t *list = th->join_list;
@@ -4981,11 +4975,9 @@ rb_check_deadlock(rb_vm_t *vm)
rb_mutex_t *mutex;
GetMutexPtr(th->locking_mutex, mutex);
- native_mutex_lock(&mutex->lock);
- if (mutex->th == th || (!mutex->th && mutex->cond_waiting)) {
+ if (mutex->th == th || (!mutex->th && !list_empty(&mutex->waitq))) {
found = 1;
}
- native_mutex_unlock(&mutex->lock);
}
if (found)
break;