diff options
author | k0kubun <k0kubun@b2dd03c8-39d4-4d8f-98ff-823fe69b080e> | 2019-03-18 17:20:21 +0000 |
---|---|---|
committer | k0kubun <k0kubun@b2dd03c8-39d4-4d8f-98ff-823fe69b080e> | 2019-03-18 17:20:21 +0000 |
commit | a75ae6faa931e8b9d32c99039a06b950df391a06 (patch) | |
tree | 967f7f2d95124d5b7ba2b1793d214b49bdad25c7 | |
parent | 4d426e28d2cbbc09208987c6cf86180ea3ba3522 (diff) | |
download | ruby-a75ae6faa931e8b9d32c99039a06b950df391a06.tar.gz |
Use alloca again instead of malloc and free
by changing interface of `mjit_copy_cache_from_main_thread`.
This is also fixing deadlock introduced by r67299.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@67300 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
-rw-r--r-- | mjit_compile.c | 16 | ||||
-rw-r--r-- | mjit_worker.c | 42 |
2 files changed, 25 insertions, 33 deletions
diff --git a/mjit_compile.c b/mjit_compile.c index 4ad083694e..7e3d4913a8 100644 --- a/mjit_compile.c +++ b/mjit_compile.c @@ -196,7 +196,7 @@ compile_cancel_handler(FILE *f, const struct rb_iseq_constant_body *body, struct fprintf(f, " return Qundef;\n"); } -extern bool mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache **cc_entries, union iseq_inline_storage_entry **is_entries); +extern bool mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc_entries, union iseq_inline_storage_entry *is_entries); // Compile ISeq to C code in `f`. It returns true if it succeeds to compile. bool @@ -211,8 +211,18 @@ mjit_compile(FILE *f, const rb_iseq_t *iseq, const char *funcname) if (status.stack_size_for_pos == NULL) return false; memset(status.stack_size_for_pos, NOT_COMPILED_STACK_SIZE, sizeof(int) * body->iseq_size); - if (mjit_copy_cache_from_main_thread(iseq, &status.cc_entries, &status.is_entries) == false) + + status.cc_entries = NULL; + if ((body->ci_size + body->ci_kw_size) > 0) + status.cc_entries = alloca(sizeof(struct rb_call_cache) * (body->ci_size + body->ci_kw_size)); + status.is_entries = NULL; + if (body->is_size > 0) + status.is_entries = alloca(sizeof(union iseq_inline_storage_entry) * body->is_size); + + if ((status.cc_entries != NULL || status.is_entries != NULL) + && !mjit_copy_cache_from_main_thread(iseq, status.cc_entries, status.is_entries)) { return false; + } /* For performance, we verify stack size only on compilation time (mjit_compile.inc.erb) without --jit-debug */ if (!mjit_opts.debug) { @@ -252,8 +262,6 @@ mjit_compile(FILE *f, const rb_iseq_t *iseq, const char *funcname) fprintf(f, "\n} /* end of %s */\n", funcname); free(status.stack_size_for_pos); - free(status.cc_entries); - free(status.is_entries); return status.success; } diff --git a/mjit_worker.c b/mjit_worker.c index a7e6bcea65..5c1f24b329 100644 --- a/mjit_worker.c +++ b/mjit_worker.c @@ -1141,13 +1141,14 @@ static void mjit_copy_job_handler(void *data); /* vm_trace.c */ int rb_workqueue_register(unsigned flags, rb_postponed_job_func_t , void *); -// Copy inline cache values of `iseq` to `*cc_entries` and `*is_entries`. +// Copy inline cache values of `iseq` to `cc_entries` and `is_entries`. +// These buffers should be pre-allocated properly prior to calling this function. // Return true if copy succeeds or is not needed. // // We're lazily copying cache values from main thread because these cache values // could be different between ones on enqueue timing and ones on dequeue timing. bool -mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache **cc_entries, union iseq_inline_storage_entry **is_entries) +mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc_entries, union iseq_inline_storage_entry *is_entries) { mjit_copy_job_t *job = &mjit_copy_job; // just a short hand @@ -1155,30 +1156,15 @@ mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache **c job->finish_p = true; // disable dispatching this job in mjit_copy_job_handler while it's being modified CRITICAL_SECTION_FINISH(3, "in mjit_copy_cache_from_main_thread"); - const struct rb_iseq_constant_body *body = iseq->body; - job->cc_entries = NULL; - if (body->ci_size > 0 || body->ci_kw_size > 0) { - job->cc_entries = malloc(sizeof(struct rb_call_cache) * (body->ci_size + body->ci_kw_size)); - if (!job->cc_entries) return false; - } - job->is_entries = NULL; - if (body->is_size > 0) { - job->is_entries = malloc(sizeof(union iseq_inline_storage_entry) * body->is_size); - if (!job->is_entries) { - free(job->cc_entries); - return false; - } - } - - // If ISeq has no inline cache, there's no need to run a copy job. - if (job->cc_entries == NULL && job->is_entries == NULL) { - *cc_entries = job->cc_entries; - *is_entries = job->is_entries; - return true; - } + job->cc_entries = cc_entries; + job->is_entries = is_entries; CRITICAL_SECTION_START(3, "in mjit_copy_cache_from_main_thread"); job->iseq = iseq; // Prevernt GC of this ISeq from here + VM_ASSERT(in_jit); + in_jit = false; // To avoid deadlock, allow running GC while waiting for copy job + rb_native_cond_signal(&mjit_client_wakeup); // Unblock main thread waiting in `mjit_gc_start_hook` + job->finish_p = false; // allow dispatching this job in mjit_copy_job_handler CRITICAL_SECTION_FINISH(3, "in mjit_copy_cache_from_main_thread"); @@ -1196,16 +1182,14 @@ mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache **c CRITICAL_SECTION_FINISH(3, "in MJIT copy job wait"); } - // Set result values. - *cc_entries = job->cc_entries; - *is_entries = job->is_entries; - - bool result = job->finish_p; CRITICAL_SECTION_START(3, "in mjit_copy_cache_from_main_thread"); - job->iseq = NULL; // Allow GC of this ISeq from here + bool result = job->finish_p; // Disable dispatching this job in mjit_copy_job_handler while memory allocated by alloca // could be expired after finishing this function. job->finish_p = true; + + in_jit = true; // Prohibit GC during JIT compilation + job->iseq = NULL; // Allow future GC of this ISeq from here CRITICAL_SECTION_FINISH(3, "in mjit_copy_cache_from_main_thread"); return result; } |