From b9007b6c548f91e88fd3f2ffa23de740431fa969 Mon Sep 17 00:00:00 2001 From: Koichi Sasada Date: Wed, 8 Jan 2020 16:14:01 +0900 Subject: Introduce disposable call-cache. This patch contains several ideas: (1) Disposable inline method cache (IMC) for race-free inline method cache * Making call-cache (CC) as a RVALUE (GC target object) and allocate new CC on cache miss. * This technique allows race-free access from parallel processing elements like RCU. (2) Introduce per-Class method cache (pCMC) * Instead of fixed-size global method cache (GMC), pCMC allows flexible cache size. * Caching CCs reduces CC allocation and allow sharing CC's fast-path between same call-info (CI) call-sites. (3) Invalidate an inline method cache by invalidating corresponding method entries (MEs) * Instead of using class serials, we set "invalidated" flag for method entry itself to represent cache invalidation. * Compare with using class serials, the impact of method modification (add/overwrite/delete) is small. * Updating class serials invalidate all method caches of the class and sub-classes. * Proposed approach only invalidate the method cache of only one ME. See [Feature #16614] for more details. --- iseq.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'iseq.c') diff --git a/iseq.c b/iseq.c index 867bbc0d63..c6c5c6e127 100644 --- a/iseq.c +++ b/iseq.c @@ -247,6 +247,7 @@ rb_iseq_update_references(rb_iseq_t *iseq) if (!SPECIAL_CONST_P(cds[i].ci)) { cds[i].ci = (struct rb_callinfo *)rb_gc_location((VALUE)cds[i].ci); } + cds[i].cc = (struct rb_callcache *)rb_gc_location((VALUE)cds[i].cc); } } if (FL_TEST(iseq, ISEQ_MARKABLE_ISEQ)) { @@ -323,6 +324,11 @@ rb_iseq_mark(const rb_iseq_t *iseq) struct rb_call_data *cds = (struct rb_call_data *)body->call_data; for (unsigned int i=0; ici_size; i++) { rb_gc_mark_movable((VALUE)cds[i].ci); + const struct rb_callcache *cc = cds[i].cc; + if (cc && vm_cc_markable(cds[i].cc)) { + rb_gc_mark_movable((VALUE)cc); + // TODO: check enable + } } } @@ -351,6 +357,14 @@ rb_iseq_mark(const rb_iseq_t *iseq) } } } + + if (body->jit_unit && body->jit_unit->cc_entries != NULL) { + // TODO: move to mjit.c? + for (unsigned int i=0; ici_size; i++) { + const struct rb_callcache *cc = body->jit_unit->cc_entries[i]; + rb_gc_mark((VALUE)cc); // pindown + } + } } if (FL_TEST_RAW(iseq, ISEQ_NOT_LOADED_YET)) { @@ -663,6 +677,9 @@ finish_iseq_build(rb_iseq_t *iseq) rb_exc_raise(err); } + RB_DEBUG_COUNTER_INC(iseq_num); + RB_DEBUG_COUNTER_ADD(iseq_cd_num, iseq->body->ci_size); + rb_iseq_init_trace(iseq); return Qtrue; } -- cgit v1.2.3