aboutsummaryrefslogtreecommitdiffstats
path: root/mjit.c
diff options
context:
space:
mode:
authorKoichi Sasada <ko1@atdot.net>2020-02-25 11:03:17 +0900
committerKoichi Sasada <ko1@atdot.net>2020-02-25 12:57:10 +0900
commit7ec23593746c8ccabd6c005cc34dde77d564c6c9 (patch)
treeb1979d603edb1808217db5d2b632a9ed24be41b3 /mjit.c
parent82d27604adba94e147c1e848f80329a8286bde5c (diff)
downloadruby-7ec23593746c8ccabd6c005cc34dde77d564c6c9.tar.gz
prevent GC from mjit worker.
ALLOC_N() can causes GC. Sometimes `mjit_copy_job_handler()` can be called by mjit_worker thread which is not a Ruby thread, so we need to prevent GC in this function. This patch has some issues, but I introduce it to pass the tests.
Diffstat (limited to 'mjit.c')
-rw-r--r--mjit.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/mjit.c b/mjit.c
index d3cb063ff9..bcf773d2d7 100644
--- a/mjit.c
+++ b/mjit.c
@@ -54,13 +54,13 @@ mjit_copy_job_handler(void *data)
}
const struct rb_iseq_constant_body *body = job->iseq->body;
- unsigned int ci_size = body->ci_size;
+ const unsigned int ci_size = body->ci_size;
if (ci_size > 0) {
- const struct rb_callcache **cc_entries = ALLOC_N(const struct rb_callcache *, ci_size);
- if (body->jit_unit == NULL) {
- create_unit(job->iseq);
- }
- body->jit_unit->cc_entries = cc_entries;
+ VM_ASSERT(body->jit_unit != NULL);
+ VM_ASSERT(body->jit_unit->cc_entries != NULL);
+
+ const struct rb_callcache **cc_entries = body->jit_unit->cc_entries;
+
for (unsigned int i=0; i<ci_size; i++) {
cc_entries[i] = body->call_data[i].cc;
}
@@ -294,6 +294,9 @@ create_unit(const rb_iseq_t *iseq)
unit->id = current_unit_num++;
unit->iseq = (rb_iseq_t *)iseq;
+ if (iseq->body->ci_size > 0) {
+ unit->cc_entries = ALLOC_N(const struct rb_callcache *, iseq->body->ci_size);
+ }
iseq->body->jit_unit = unit;
}