aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAaron Patterson <tenderlove@ruby-lang.org>2020-10-21 13:16:15 -0700
committerAaron Patterson <aaron.patterson@gmail.com>2020-10-22 07:59:06 -0700
commitabf678a4397c6c00a1bb686043e377d372e695a4 (patch)
treeb29918f9bd80cfeec14a1e9dbc716d379f0cb504
parentd23d5c3130a0944e7e591370cbf8599009318a7c (diff)
downloadruby-abf678a4397c6c00a1bb686043e377d372e695a4.tar.gz
Use a lock level for a less granular lock.
We are seeing an error where code that is generated with MJIT contains references to objects that have been moved. I believe this is due to a race condition in the compaction function. `gc_compact` has two steps: 1. Run a full GC to pin objects 2. Compact / update references Step one is executed with `garbage_collect`. `garbage_collect` calls `gc_enter` / `gc_exit`, these functions acquire a JIT lock and release a JIT lock. So a lock is held for the duration of step 1. Step two is executed by `gc_compact_after_gc`. It also holds a JIT lock. I believe the problem is that the JIT is free to execute between step 1 and step 2. It copies call cache values, but doesn't pin them when it copies them. So the compactor thinks it's OK to move the call cache even though it is not safe. We need to hold a lock for the duration of `garbage_collect` *and* `gc_compact_after_gc`. This patch introduces a lock level which increments and decrements. The compaction function can increment and decrement the lock level and prevent MJIT from executing during both steps.
-rw-r--r--gc.c2
-rw-r--r--mjit.c13
-rw-r--r--mjit_worker.c4
3 files changed, 13 insertions, 6 deletions
diff --git a/gc.c b/gc.c
index 6c3fc50229..c97246a9cc 100644
--- a/gc.c
+++ b/gc.c
@@ -8834,10 +8834,12 @@ gc_compact(rb_objspace_t *objspace, int use_toward_empty, int use_double_pages,
objspace->flags.during_compacting = TRUE;
{
+ mjit_gc_start_hook();
/* pin objects referenced by maybe pointers */
garbage_collect(objspace, GPR_DEFAULT_REASON);
/* compact */
gc_compact_after_gc(objspace, use_toward_empty, use_double_pages, use_verifier);
+ mjit_gc_exit_hook();
}
objspace->flags.during_compacting = FALSE;
}
diff --git a/mjit.c b/mjit.c
index b6abcf4f1d..ab74a066d9 100644
--- a/mjit.c
+++ b/mjit.c
@@ -96,7 +96,7 @@ mjit_gc_start_hook(void)
rb_native_cond_wait(&mjit_client_wakeup, &mjit_engine_mutex);
verbose(4, "Getting wakeup from a worker for GC");
}
- in_gc = true;
+ in_gc++;
CRITICAL_SECTION_FINISH(4, "mjit_gc_start_hook");
}
@@ -108,9 +108,14 @@ mjit_gc_exit_hook(void)
if (!mjit_enabled)
return;
CRITICAL_SECTION_START(4, "mjit_gc_exit_hook");
- in_gc = false;
- verbose(4, "Sending wakeup signal to workers after GC");
- rb_native_cond_broadcast(&mjit_gc_wakeup);
+ in_gc--;
+ if (in_gc < 0) { // Don't allow underflow
+ in_gc = 0;
+ }
+ if (!in_gc) {
+ verbose(4, "Sending wakeup signal to workers after GC");
+ rb_native_cond_broadcast(&mjit_gc_wakeup);
+ }
CRITICAL_SECTION_FINISH(4, "mjit_gc_exit_hook");
}
diff --git a/mjit_worker.c b/mjit_worker.c
index 461b10f0ef..63f976a2b9 100644
--- a/mjit_worker.c
+++ b/mjit_worker.c
@@ -220,8 +220,8 @@ static rb_nativethread_cond_t mjit_client_wakeup;
static rb_nativethread_cond_t mjit_worker_wakeup;
// A thread conditional to wake up workers if at the end of GC.
static rb_nativethread_cond_t mjit_gc_wakeup;
-// True when GC is working.
-static bool in_gc = false;
+// Greater than 0 when GC is working.
+static int in_gc = 0;
// True when JIT is working.
static bool in_jit = false;
// True when JIT compaction is running.