aboutsummaryrefslogtreecommitdiffstats
path: root/mjit.c
diff options
context:
space:
mode:
authorTakashi Kokubun <takashikkbn@gmail.com>2020-11-27 22:46:01 -0800
committerTakashi Kokubun <takashikkbn@gmail.com>2020-11-27 23:06:40 -0800
commit122cd35939ddf8ef7bfa17ad75570c01d0cf06ab (patch)
tree99312201b420877bb73698133e57aff3114b7cbd /mjit.c
parent95bef7b69a6fb42687a6200b338060be307259f5 (diff)
downloadruby-122cd35939ddf8ef7bfa17ad75570c01d0cf06ab.tar.gz
Throttle unload_units
Because d80226e7bd often reduces the number of unloaded units, it increases the number of unload_units calls, which are heavy. To mitigate that, this throttles unload_units per `max_cache_size / 10`. Also hoping to fix https://ci.appveyor.com/project/ruby/ruby/builds/36552382/job/kjmjgw9cjyf2ksd7
Diffstat (limited to 'mjit.c')
-rw-r--r--mjit.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/mjit.c b/mjit.c
index 94857df524..c65355fba9 100644
--- a/mjit.c
+++ b/mjit.c
@@ -261,7 +261,7 @@ mjit_add_iseq_to_process(const rb_iseq_t *iseq, const struct rb_mjit_compile_inf
CRITICAL_SECTION_START(3, "in add_iseq_to_process");
add_to_list(iseq->body->jit_unit, &unit_queue);
if (active_units.length >= mjit_opts.max_cache_size) {
- unload_units_p = true;
+ unload_requests++;
}
verbose(3, "Sending wakeup signal to workers in mjit_add_iseq_to_process");
rb_native_cond_broadcast(&mjit_worker_wakeup);