aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ChangeLog8
-rw-r--r--thread.c60
2 files changed, 51 insertions, 17 deletions
diff --git a/ChangeLog b/ChangeLog
index df3bd4f5b4..174075af0a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,11 @@
+Fri Apr 29 10:07:13 2011 KOSAKI Motohiro <kosaki.motohiro@gmail.com>
+
+ * thread.c (rb_mutex_lock, lock_func): Avoid busy loop and
+ performance degression. bm_vm3_thread_mutex.rb performance
+ change from 109.064sec to 16.331sec.
+
+ * thread.c (init_lock_timeout): New helper function.
+
Thu Apr 28 16:15:49 2011 NAKAMURA Usaku <usa@ruby-lang.org>
* win32/{win32.c,dir.h} (rb_w32_uopendir): new API to pass UTF-8 path.
diff --git a/thread.c b/thread.c
index f787bf1499..ae4e669d15 100644
--- a/thread.c
+++ b/thread.c
@@ -3208,24 +3208,49 @@ rb_mutex_trylock(VALUE self)
return locked;
}
+static struct timespec init_lock_timeout(int timeout_ms)
+{
+ struct timespec ts;
+ struct timeval tv;
+ int ret;
+
+ ret = gettimeofday(&tv, NULL);
+ if (ret < 0)
+ rb_sys_fail(0);
+
+ ts.tv_sec = tv.tv_sec;
+ ts.tv_nsec = tv.tv_usec * 1000 + timeout_ms * 1000 * 1000;
+ if (ts.tv_nsec >= 1000000000) {
+ ts.tv_sec++;
+ ts.tv_nsec -= 1000000000;
+ }
+
+ return ts;
+}
+
static int
-lock_func(rb_thread_t *th, mutex_t *mutex, int last_thread)
+lock_func(rb_thread_t *th, mutex_t *mutex, int timeout_ms)
{
int interrupted = 0;
-#if 0 /* for debug */
- native_thread_yield();
-#endif
native_mutex_lock(&mutex->lock);
th->transition_for_lock = 0;
while (mutex->th || (mutex->th = th, 0)) {
- if (last_thread) {
- interrupted = 2;
- break;
- }
+ struct timespec ts;
+ int ret;
mutex->cond_waiting++;
- native_cond_wait(&mutex->cond, &mutex->lock);
+ if (timeout_ms) {
+ ts = init_lock_timeout(timeout_ms);
+ ret = native_cond_timedwait(&mutex->cond, &mutex->lock, &ts);
+ if (ret == ETIMEDOUT) {
+ interrupted = 2;
+ break;
+ }
+ }
+ else {
+ native_cond_wait(&mutex->cond, &mutex->lock);
+ }
mutex->cond_notified--;
if (RUBY_VM_INTERRUPTED(th)) {
@@ -3236,11 +3261,6 @@ lock_func(rb_thread_t *th, mutex_t *mutex, int last_thread)
th->transition_for_lock = 1;
native_mutex_unlock(&mutex->lock);
- if (interrupted == 2) native_thread_yield();
-#if 0 /* for debug */
- native_thread_yield();
-#endif
-
return interrupted;
}
@@ -3280,20 +3300,26 @@ rb_mutex_lock(VALUE self)
while (mutex->th != th) {
int interrupted;
enum rb_thread_status prev_status = th->status;
- int last_thread = 0;
+ int timeout_ms = 0;
struct rb_unblock_callback oldubf;
set_unblock_function(th, lock_interrupt, mutex, &oldubf);
th->status = THREAD_STOPPED_FOREVER;
th->vm->sleeper++;
th->locking_mutex = self;
+
+ /*
+ * Carefully! while some contended threads are in lock_fun(),
+ * vm->sleepr is unstable value. we have to avoid both deadlock
+ * and busy loop.
+ */
if (vm_living_thread_num(th->vm) == th->vm->sleeper) {
- last_thread = 1;
+ timeout_ms = 100;
}
th->transition_for_lock = 1;
BLOCKING_REGION_CORE({
- interrupted = lock_func(th, mutex, last_thread);
+ interrupted = lock_func(th, mutex, timeout_ms);
});
th->transition_for_lock = 0;
remove_signal_thread_list(th);