aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKoichi Sasada <ko1@atdot.net>2020-01-08 16:14:01 +0900
committerKoichi Sasada <ko1@atdot.net>2020-02-22 09:58:59 +0900
commitb9007b6c548f91e88fd3f2ffa23de740431fa969 (patch)
tree1746393d1c5f704e8dc7e0a458198264062273bf
parentf2286925f08406bc857f7b03ad6779a5d61443ae (diff)
downloadruby-b9007b6c548f91e88fd3f2ffa23de740431fa969.tar.gz
Introduce disposable call-cache.
This patch contains several ideas: (1) Disposable inline method cache (IMC) for race-free inline method cache * Making call-cache (CC) as a RVALUE (GC target object) and allocate new CC on cache miss. * This technique allows race-free access from parallel processing elements like RCU. (2) Introduce per-Class method cache (pCMC) * Instead of fixed-size global method cache (GMC), pCMC allows flexible cache size. * Caching CCs reduces CC allocation and allow sharing CC's fast-path between same call-info (CI) call-sites. (3) Invalidate an inline method cache by invalidating corresponding method entries (MEs) * Instead of using class serials, we set "invalidated" flag for method entry itself to represent cache invalidation. * Compare with using class serials, the impact of method modification (add/overwrite/delete) is small. * Updating class serials invalidate all method caches of the class and sub-classes. * Proposed approach only invalidate the method cache of only one ME. See [Feature #16614] for more details.
-rw-r--r--class.c45
-rw-r--r--common.mk1
-rw-r--r--compile.c38
-rw-r--r--debug_counter.h92
-rw-r--r--eval.c2
-rw-r--r--ext/objspace/objspace.c1
-rw-r--r--gc.c204
-rw-r--r--id_table.c2
-rw-r--r--insns.def13
-rw-r--r--internal/class.h2
-rw-r--r--internal/imemo.h4
-rw-r--r--internal/vm.h41
-rw-r--r--iseq.c17
-rw-r--r--method.h11
-rw-r--r--mjit.c19
-rw-r--r--mjit.h29
-rw-r--r--mjit_compile.c42
-rw-r--r--mjit_worker.c30
-rw-r--r--test/-ext-/tracepoint/test_tracepoint.rb12
-rw-r--r--test/ruby/test_gc.rb3
-rw-r--r--test/ruby/test_inlinecache.rb64
-rw-r--r--tool/mk_call_iseq_optimized.rb2
-rw-r--r--tool/ruby_vm/views/_mjit_compile_send.erb23
-rw-r--r--tool/ruby_vm/views/mjit_compile.inc.erb2
-rw-r--r--vm.c26
-rw-r--r--vm_callinfo.h235
-rw-r--r--vm_core.h3
-rw-r--r--vm_dump.c4
-rw-r--r--vm_eval.c50
-rw-r--r--vm_insnhelper.c814
-rw-r--r--vm_insnhelper.h15
-rw-r--r--vm_method.c630
32 files changed, 1606 insertions, 870 deletions
diff --git a/class.c b/class.c
index f181f33690..98b2a1d92e 100644
--- a/class.c
+++ b/class.c
@@ -894,12 +894,21 @@ add_refined_method_entry_i(ID key, VALUE value, void *data)
static void ensure_origin(VALUE klass);
+static enum rb_id_table_iterator_result
+clear_module_cache_i(ID id, VALUE val, void *data)
+{
+ VALUE klass = (VALUE)data;
+ rb_clear_method_cache(klass, id);
+ return ID_TABLE_CONTINUE;
+}
+
static int
include_modules_at(const VALUE klass, VALUE c, VALUE module, int search_super)
{
VALUE p, iclass;
int method_changed = 0, constant_changed = 0;
struct rb_id_table *const klass_m_tbl = RCLASS_M_TBL(RCLASS_ORIGIN(klass));
+ VALUE original_klass = klass;
if (FL_TEST(module, RCLASS_REFINED_BY_ANY)) {
ensure_origin(module);
@@ -912,7 +921,7 @@ include_modules_at(const VALUE klass, VALUE c, VALUE module, int search_super)
if (klass_m_tbl && klass_m_tbl == RCLASS_M_TBL(module))
return -1;
/* ignore if the module included already in superclasses */
- for (p = RCLASS_SUPER(klass); p; p = RCLASS_SUPER(p)) {
+ for (p = RCLASS_SUPER(klass); p; p = RCLASS_SUPER(p)) {
int type = BUILTIN_TYPE(p);
if (type == T_ICLASS) {
if (RCLASS_M_TBL(p) == RCLASS_M_TBL(module)) {
@@ -924,37 +933,53 @@ include_modules_at(const VALUE klass, VALUE c, VALUE module, int search_super)
}
else if (type == T_CLASS) {
if (!search_super) break;
- superclass_seen = TRUE;
+ superclass_seen = TRUE;
}
}
- iclass = rb_include_class_new(module, RCLASS_SUPER(c));
+
+ VALUE super_class = RCLASS_SUPER(c);
+
+ // invalidate inline method cache
+ tbl = RMODULE_M_TBL(module);
+ if (tbl && rb_id_table_size(tbl)) {
+ if (search_super) { // include
+ if (super_class && !RB_TYPE_P(super_class, T_MODULE)) {
+ rb_id_table_foreach(tbl, clear_module_cache_i, (void *)super_class);
+ }
+ }
+ else { // prepend
+ if (!RB_TYPE_P(original_klass, T_MODULE)) {
+ rb_id_table_foreach(tbl, clear_module_cache_i, (void *)original_klass);
+ }
+ }
+ method_changed = 1;
+ }
+
+ // setup T_ICLASS for the include/prepend module
+ iclass = rb_include_class_new(module, super_class);
c = RCLASS_SET_SUPER(c, iclass);
RCLASS_SET_INCLUDER(iclass, klass);
{
VALUE m = module;
- if (BUILTIN_TYPE(m) == T_ICLASS) m = RBASIC(m)->klass;
- rb_module_add_to_subclasses_list(m, iclass);
+ if (BUILTIN_TYPE(m) == T_ICLASS) m = RBASIC(m)->klass;
+ rb_module_add_to_subclasses_list(m, iclass);
}
if (FL_TEST(klass, RMODULE_IS_REFINEMENT)) {
VALUE refined_class =
rb_refinement_module_get_refined_class(klass);
- rb_id_table_foreach(RMODULE_M_TBL(module), add_refined_method_entry_i, (void *)refined_class);
+ rb_id_table_foreach(RMODULE_M_TBL(module), add_refined_method_entry_i, (void *)refined_class);
FL_SET(c, RMODULE_INCLUDED_INTO_REFINEMENT);
}
- tbl = RMODULE_M_TBL(module);
- if (tbl && rb_id_table_size(tbl)) method_changed = 1;
-
tbl = RMODULE_CONST_TBL(module);
if (tbl && rb_id_table_size(tbl)) constant_changed = 1;
skip:
module = RCLASS_SUPER(module);
}
- if (method_changed) rb_clear_method_cache_by_class(klass);
if (constant_changed) rb_clear_constant_cache();
return method_changed;
diff --git a/common.mk b/common.mk
index 50f2c156a6..5680573bd2 100644
--- a/common.mk
+++ b/common.mk
@@ -2946,6 +2946,7 @@ mjit.$(OBJEXT): {$(VPATH)}thread.h
mjit.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
mjit.$(OBJEXT): {$(VPATH)}thread_native.h
mjit.$(OBJEXT): {$(VPATH)}util.h
+mjit.$(OBJEXT): {$(VPATH)}vm_callinfo.h
mjit.$(OBJEXT): {$(VPATH)}vm_core.h
mjit.$(OBJEXT): {$(VPATH)}vm_opts.h
mjit_compile.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
diff --git a/compile.c b/compile.c
index e1efbcd909..94daa65b78 100644
--- a/compile.c
+++ b/compile.c
@@ -566,6 +566,8 @@ static void
verify_call_cache(rb_iseq_t *iseq)
{
#if CPDEBUG
+ // fprintf(stderr, "ci_size:%d\t", iseq->body->ci_size); rp(iseq);
+
VALUE *original = rb_iseq_original_iseq(iseq);
size_t i = 0;
while (i < iseq->body->iseq_size) {
@@ -574,16 +576,27 @@ verify_call_cache(rb_iseq_t *iseq)
for (int j=0; types[j]; j++) {
if (types[j] == TS_CALLDATA) {
- struct rb_call_cache cc;
struct rb_call_data *cd = (struct rb_call_data *)original[i+j+1];
- MEMZERO(&cc, cc, 1);
- if (memcmp(&cc, &cd->cc, sizeof(cc))) {
- rb_bug("call cache not zero for fresh iseq");
+ const struct rb_callinfo *ci = cd->ci;
+ const struct rb_callcache *cc = cd->cc;
+ if (cc != vm_cc_empty()) {
+ vm_ci_dump(ci);
+ rb_bug("call cache is not initialized by vm_cc_empty()");
}
}
}
i += insn_len(insn);
}
+
+ for (unsigned int i=0; i<iseq->body->ci_size; i++) {
+ struct rb_call_data *cd = &iseq->body->call_data[i];
+ const struct rb_callinfo *ci = cd->ci;
+ const struct rb_callcache *cc = cd->cc;
+ if (cc != NULL && cc != vm_cc_empty()) {
+ vm_ci_dump(ci);
+ rb_bug("call cache is not initialized by vm_cc_empty()");
+ }
+ }
#endif
}
@@ -661,7 +674,7 @@ rb_iseq_compile_node(rb_iseq_t *iseq, const NODE *node)
DECL_ANCHOR(ret);
INIT_ANCHOR(ret);
- if (imemo_type_p((VALUE)node, imemo_ifunc)) {
+ if (IMEMO_TYPE_P(node, imemo_ifunc)) {
rb_raise(rb_eArgError, "unexpected imemo_ifunc");
}
@@ -1212,6 +1225,7 @@ new_callinfo(rb_iseq_t *iseq, ID mid, int argc, unsigned int flag, struct rb_cal
argc += kw_arg->keyword_len;
}
+ // fprintf(stderr, "[%d] id:%s\t", (int)iseq->body->ci_size, rb_id2name(mid)); rp(iseq);
iseq->body->ci_size++;
const struct rb_callinfo *ci = vm_ci_new(mid, flag, argc, kw_arg);
RB_OBJ_WRITTEN(iseq, Qundef, ci);
@@ -2223,6 +2237,7 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
struct rb_call_data *cd = &body->call_data[ISEQ_COMPILE_DATA(iseq)->ci_index++];
assert(ISEQ_COMPILE_DATA(iseq)->ci_index <= body->ci_size);
cd->ci = source_ci;
+ cd->cc = vm_cc_empty();
generated_iseq[code_index + 1 + j] = (VALUE)cd;
break;
}
@@ -10301,16 +10316,18 @@ ibf_dump_ci_entries(struct ibf_dump *dump, const rb_iseq_t *iseq)
}
/* note that we dump out rb_call_info but load back rb_call_data */
-static struct rb_call_data *
+static void
ibf_load_ci_entries(const struct ibf_load *load,
ibf_offset_t ci_entries_offset,
- unsigned int ci_size)
+ unsigned int ci_size,
+ struct rb_call_data **cd_ptr)
{
ibf_offset_t reading_pos = ci_entries_offset;
unsigned int i;
struct rb_call_data *cds = ZALLOC_N(struct rb_call_data, ci_size);
+ *cd_ptr = cds;
for (i = 0; i < ci_size; i++) {
VALUE mid_index = ibf_load_small_value(load, &reading_pos);
@@ -10331,10 +10348,9 @@ ibf_load_ci_entries(const struct ibf_load *load,
cds[i].ci = vm_ci_new(mid, flag, argc, kwarg);
RB_OBJ_WRITTEN(load->iseq, Qundef, cds[i].ci);
+ cds[i].cc = vm_cc_empty();
}
-
- return cds;
-}
+}
static ibf_offset_t
ibf_dump_iseq_each(struct ibf_dump *dump, const rb_iseq_t *iseq)
@@ -10588,7 +10604,7 @@ ibf_load_iseq_each(struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t offset)
load_body->catch_except_p = catch_except_p;
load_body->is_entries = ZALLOC_N(union iseq_inline_storage_entry, is_size);
- load_body->call_data = ibf_load_ci_entries(load, ci_entries_offset, ci_size);
+ ibf_load_ci_entries(load, ci_entries_offset, ci_size, &load_body->call_data);
load_body->param.opt_table = ibf_load_param_opt_table(load, param_opt_table_offset, param_opt_num);
load_body->param.keyword = ibf_load_param_keyword(load, param_keyword_offset);
load_body->param.flags.has_kw = (param_flags >> 4) & 1;
diff --git a/debug_counter.h b/debug_counter.h
index 19909fbb29..e38b5c6b79 100644
--- a/debug_counter.h
+++ b/debug_counter.h
@@ -14,46 +14,45 @@
#ifdef RB_DEBUG_COUNTER
-/*
- * method cache (mc) counts.
- *
- * * mc_inline_hit/miss: inline mc hit/miss counts (VM send insn)
- * * mc_global_hit/miss: global method cache hit/miss counts
- * two types: (1) inline cache miss (VM send insn)
- * (2) called from C (rb_funcall).
- * * mc_global_state_miss: inline mc miss by global_state miss.
- * * mc_class_serial_miss: ... by mc_class_serial_miss
- * * mc_cme_complement: callable_method_entry complement counts.
- * * mc_cme_complement_hit: callable_method_entry cache hit counts.
- * * mc_search_super: search_method() call counts.
- * * mc_miss_by_nome: inline mc miss by no ment.
- * * mc_miss_by_distinct: ... by distinct ment.
- * * mc_miss_by_refine: ... by ment being refined.
- * * mc_miss_by_visi: ... by visibility change.
- * * mc_miss_spurious: spurious inline mc misshit.
- * * mc_miss_reuse_call: count of reuse of cc->call.
- */
-RB_DEBUG_COUNTER(mc_inline_hit)
-RB_DEBUG_COUNTER(mc_inline_miss)
-RB_DEBUG_COUNTER(mc_global_hit)
-RB_DEBUG_COUNTER(mc_global_miss)
-RB_DEBUG_COUNTER(mc_global_state_miss)
-RB_DEBUG_COUNTER(mc_class_serial_miss)
-RB_DEBUG_COUNTER(mc_cme_complement)
-RB_DEBUG_COUNTER(mc_cme_complement_hit)
-RB_DEBUG_COUNTER(mc_search_super)
-RB_DEBUG_COUNTER(mc_miss_by_nome)
-RB_DEBUG_COUNTER(mc_miss_by_distinct)
-RB_DEBUG_COUNTER(mc_miss_by_refine)
-RB_DEBUG_COUNTER(mc_miss_by_visi)
-RB_DEBUG_COUNTER(mc_miss_spurious)
-RB_DEBUG_COUNTER(mc_miss_reuse_call)
+// method cache (IMC: inline method cache)
+RB_DEBUG_COUNTER(mc_inline_hit) // IMC hit
+RB_DEBUG_COUNTER(mc_inline_miss_klass) // IMC miss by different class
+RB_DEBUG_COUNTER(mc_inline_miss_invalidated) // IMC miss by invalidated ME
+RB_DEBUG_COUNTER(mc_cme_complement) // number of acquiring complement CME
+RB_DEBUG_COUNTER(mc_cme_complement_hit) // number of cahche hit for complemented CME
+
+RB_DEBUG_COUNTER(mc_search) // count for method lookup in class tree
+RB_DEBUG_COUNTER(mc_search_notfound) // method lookup, but not found
+RB_DEBUG_COUNTER(mc_search_super) // total traversed classes
// callinfo
-RB_DEBUG_COUNTER(ci_packed)
-RB_DEBUG_COUNTER(ci_kw)
-RB_DEBUG_COUNTER(ci_nokw)
-RB_DEBUG_COUNTER(ci_runtime)
+RB_DEBUG_COUNTER(ci_packed) // number of packed CI
+RB_DEBUG_COUNTER(ci_kw) // non-packed CI w/ keywords
+RB_DEBUG_COUNTER(ci_nokw) // non-packed CI w/o keywords
+RB_DEBUG_COUNTER(ci_runtime) // creating temporary CI
+
+// callcache
+RB_DEBUG_COUNTER(cc_new) // number of CC
+RB_DEBUG_COUNTER(cc_temp) // dummy CC (stack-allocated)
+RB_DEBUG_COUNTER(cc_found_ccs) // count for CC lookup sucess in CCS
+
+RB_DEBUG_COUNTER(cc_ent_invalidate) // count for invalidating cc (cc->klass = 0)
+RB_DEBUG_COUNTER(cc_cme_invalidate) // coutn for invalidating CME
+
+RB_DEBUG_COUNTER(cc_invalidate_leaf) // count for invalidating klass if klass has no-sublcasses
+RB_DEBUG_COUNTER(cc_invalidate_leaf_ccs) // corresponding CCS
+RB_DEBUG_COUNTER(cc_invalidate_leaf_callable) // complimented cache (no-subclasses)
+RB_DEBUG_COUNTER(cc_invalidate_tree) // count for invalidating klass if klass has sublcasses
+RB_DEBUG_COUNTER(cc_invalidate_tree_cme) // cme if cme is found in this class or superclasses
+RB_DEBUG_COUNTER(cc_invalidate_tree_callable) // complimented cache (subclasses)
+
+RB_DEBUG_COUNTER(ccs_free) // count for free'ing ccs
+RB_DEBUG_COUNTER(ccs_maxlen) // maximum length of ccs
+RB_DEBUG_COUNTER(ccs_found) // count for finding corresponding ccs on method lookup
+
+// iseq
+RB_DEBUG_COUNTER(iseq_num) // number of total created iseq
+RB_DEBUG_COUNTER(iseq_cd_num) // number of total created cd (call_data)
/*
* call cache fastpath usage
@@ -289,6 +288,7 @@ RB_DEBUG_COUNTER(obj_imemo_ifunc)
RB_DEBUG_COUNTER(obj_imemo_memo)
RB_DEBUG_COUNTER(obj_imemo_parser_strterm)
RB_DEBUG_COUNTER(obj_imemo_callinfo)
+RB_DEBUG_COUNTER(obj_imemo_callcache)
/* ar_table */
RB_DEBUG_COUNTER(artable_hint_hit)
@@ -375,17 +375,33 @@ rb_debug_counter_add(enum rb_debug_counter_type type, int add, int cond)
return cond;
}
+inline static int
+rb_debug_counter_max(enum rb_debug_counter_type type, unsigned int num)
+{
+ if (rb_debug_counter[(int)type] < num) {
+ rb_debug_counter[(int)type] = num;
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
+
VALUE rb_debug_counter_reset(VALUE klass);
VALUE rb_debug_counter_show(VALUE klass);
#define RB_DEBUG_COUNTER_INC(type) rb_debug_counter_add(RB_DEBUG_COUNTER_##type, 1, 1)
#define RB_DEBUG_COUNTER_INC_UNLESS(type, cond) (!rb_debug_counter_add(RB_DEBUG_COUNTER_##type, 1, !(cond)))
#define RB_DEBUG_COUNTER_INC_IF(type, cond) rb_debug_counter_add(RB_DEBUG_COUNTER_##type, 1, (cond))
+#define RB_DEBUG_COUNTER_ADD(type, num) rb_debug_counter_add(RB_DEBUG_COUNTER_##type, (num), 1)
+#define RB_DEBUG_COUNTER_SETMAX(type, num) rb_debug_counter_max(RB_DEBUG_COUNTER_##type, (unsigned int)(num))
#else
#define RB_DEBUG_COUNTER_INC(type) ((void)0)
#define RB_DEBUG_COUNTER_INC_UNLESS(type, cond) (cond)
#define RB_DEBUG_COUNTER_INC_IF(type, cond) (cond)
+#define RB_DEBUG_COUNTER_ADD(type, num) ((void)0)
+#define RB_DEBUG_COUNTER_SETMAX(type, num) 0
#endif
void rb_debug_counter_show_results(const char *msg);
diff --git a/eval.c b/eval.c
index 429ecbe0d9..d5154b7db1 100644
--- a/eval.c
+++ b/eval.c
@@ -1476,7 +1476,7 @@ rb_using_module(const rb_cref_t *cref, VALUE module)
{
Check_Type(module, T_MODULE);
using_module_recursive(cref, module);
- rb_clear_method_cache_by_class(rb_cObject);
+ rb_clear_method_cache_all();
}
/*! \private */
diff --git a/ext/objspace/objspace.c b/ext/objspace/objspace.c
index 38d3d2fcff..dc1a0cb08f 100644
--- a/ext/objspace/objspace.c
+++ b/ext/objspace/objspace.c
@@ -638,6 +638,7 @@ count_imemo_objects(int argc, VALUE *argv, VALUE self)
imemo_type_ids[9] = rb_intern("imemo_ast");
imemo_type_ids[10] = rb_intern("imemo_parser_strterm");
imemo_type_ids[11] = rb_intern("imemo_callinfo");
+ imemo_type_ids[12] = rb_intern("imemo_callcache");
}
rb_objspace_each_objects(count_imemo_objects_i, (void *)hash);
diff --git a/gc.c b/gc.c
index ad93937ed8..21c93b165e 100644
--- a/gc.c
+++ b/gc.c
@@ -2530,6 +2530,116 @@ rb_free_const_table(struct rb_id_table *tbl)
rb_id_table_free(tbl);
}
+// alive: if false, target pointers can be freed already.
+// To check it, we need objspace parameter.
+static void
+vm_ccs_free(struct rb_class_cc_entries *ccs, int alive, rb_objspace_t *objspace, VALUE klass)
+{
+ if (ccs->entries) {
+ for (int i=0; i<ccs->len; i++) {
+ const struct rb_callcache *cc = ccs->entries[i].cc;
+ if (!alive) {
+ // ccs can be free'ed.
+ if (is_pointer_to_heap(objspace, (void *)cc) &&
+ IMEMO_TYPE_P(cc, imemo_callcache) &&
+ cc->klass == klass) {
+ // OK. maybe target cc.
+ }
+ else {
+ continue;
+ }
+ }
+ vm_cc_invalidate(cc);
+ }
+ ruby_xfree(ccs->entries);
+ }
+ ruby_xfree(ccs);
+}
+
+void
+rb_vm_ccs_free(struct rb_class_cc_entries *ccs)
+{
+ RB_DEBUG_COUNTER_INC(ccs_free);
+ vm_ccs_free(ccs, TRUE, NULL, Qundef);
+}
+
+struct cc_tbl_i_data {
+ rb_objspace_t *objspace;
+ VALUE klass;
+ bool alive;
+};
+
+static enum rb_id_table_iterator_result
+cc_table_mark_i(ID id, VALUE ccs_ptr, void *data_ptr)
+{
+ struct cc_tbl_i_data *data = data_ptr;
+ struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
+ VM_ASSERT(vm_ccs_p(ccs));
+ VM_ASSERT(id == ccs->cme->called_id);
+
+ if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
+ rb_vm_ccs_free(ccs);
+ return ID_TABLE_DELETE;
+ }
+ else {
+ gc_mark(data->objspace, (VALUE)ccs->cme);
+
+ for (int i=0; i<ccs->len; i++) {
+ VM_ASSERT(data->klass == ccs->entries[i].cc->klass);
+ VM_ASSERT(ccs->cme == vm_cc_cme(ccs->entries[i].cc));
+
+ gc_mark(data->objspace, (VALUE)ccs->entries[i].ci);
+ gc_mark(data->objspace, (VALUE)ccs->entries[i].cc);
+ }
+ return ID_TABLE_CONTINUE;
+ }
+}
+
+static void
+cc_table_mark(rb_objspace_t *objspace, VALUE klass)
+{
+ struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
+ if (cc_tbl) {
+ struct cc_tbl_i_data data = {
+ .objspace = objspace,
+ .klass = klass,
+ };
+ rb_id_table_foreach(cc_tbl, cc_table_mark_i, &data);
+ }
+}
+
+static enum rb_id_table_iterator_result
+cc_table_free_i(ID id, VALUE ccs_ptr, void *data_ptr)
+{
+ struct cc_tbl_i_data *data = data_ptr;
+ struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
+ VM_ASSERT(vm_ccs_p(ccs));
+ vm_ccs_free(ccs, data->alive, data->objspace, data->klass);
+ return ID_TABLE_CONTINUE;
+}
+
+static void
+cc_table_free(rb_objspace_t *objspace, VALUE klass, bool alive)
+{
+ struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
+
+ if (cc_tbl) {
+ struct cc_tbl_i_data data = {
+ .objspace = objspace,
+ .klass = klass,
+ .alive = alive,
+ };
+ rb_id_table_foreach(cc_tbl, cc_table_free_i, &data);
+ rb_id_table_free(cc_tbl);
+ }
+}
+
+void
+rb_cc_table_free(VALUE klass)
+{
+ cc_table_free(&rb_objspace, klass, TRUE);
+}
+
static inline void
make_zombie(rb_objspace_t *objspace, VALUE obj, void (*dfree)(void *), void *data)
{
@@ -2621,6 +2731,7 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
case T_CLASS:
mjit_remove_class_serial(RCLASS_SERIAL(obj));
rb_id_table_free(RCLASS_M_TBL(obj));
+ cc_table_free(objspace, obj, FALSE);
if (RCLASS_IV_TBL(obj)) {
st_free_table(RCLASS_IV_TBL(obj));
}
@@ -2805,6 +2916,7 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
rb_class_detach_subclasses(obj);
RCLASS_EXT(obj)->subclasses = NULL;
}
+ cc_table_free(objspace, obj, FALSE);
rb_class_remove_from_module_subclasses(obj);
rb_class_remove_from_super_subclasses(obj);
xfree(RANY(obj)->as.klass.ptr);
@@ -2896,6 +3008,9 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
case imemo_callinfo:
RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
break;
+ case imemo_callcache:
+ RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
+ break;
default:
/* unreachable */
break;
@@ -5335,6 +5450,13 @@ gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
return;
case imemo_callinfo:
return;
+ case imemo_callcache:
+ {
+ const struct rb_callcache *cc = (const struct rb_callcache *)obj;
+ // should not mark klass here
+ gc_mark(objspace, (VALUE)vm_cc_cme(cc));
+ }
+ return;
#if VM_CHECK_MODE > 0
default:
VM_UNREACHABLE(gc_mark_imemo);
@@ -5383,7 +5505,9 @@ gc_mark_children(rb_objspace_t *objspace, VALUE obj)
gc_mark(objspace, RCLASS_SUPER(obj));
}
if (!RCLASS_EXT(obj)) break;
+
mark_m_tbl(objspace, RCLASS_M_TBL(obj));
+ cc_table_mark(objspace, obj);
mark_tbl_no_pin(objspace, RCLASS_IV_TBL(obj));
mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
break;
@@ -5397,6 +5521,7 @@ gc_mark_children(rb_objspace_t *objspace, VALUE obj)
}
if (!RCLASS_EXT(obj)) break;
mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
+ cc_table_mark(objspace, obj);
break;
case T_ARRAY:
@@ -8126,6 +8251,13 @@ gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj)
case imemo_ast:
rb_ast_update_references((rb_ast_t *)obj);
break;
+ case imemo_callcache:
+ {
+ const struct rb_callcache *cc = (const struct rb_callcache *)obj;
+ UPDATE_IF_MOVED(objspace, cc->klass);
+ TYPED_UPDATE_IF_MOVED(objspace, struct rb_callable_method_entry_struct *, cc->cme_);
+ }
+ break;
case imemo_parser_strterm:
case imemo_tmpbuf:
case imemo_callinfo:
@@ -8202,6 +8334,39 @@ update_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
}
static enum rb_id_table_iterator_result
+update_cc_tbl_i(ID id, VALUE ccs_ptr, void *data)
+{
+ rb_objspace_t *objspace = (rb_objspace_t *)data;
+ struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
+ VM_ASSERT(vm_ccs_p(ccs));
+
+ if (gc_object_moved_p(objspace, (VALUE)ccs->cme)) {
+ ccs->cme = (const rb_callable_method_entry_t *)rb_gc_location((VALUE)ccs->cme);
+ }
+
+ for (int i=0; i<ccs->len; i++) {
+ if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].ci)) {
+ ccs->entries[i].ci = (struct rb_callinfo *)rb_gc_location((VALUE)ccs->entries[i].ci);
+ }
+ if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
+ ccs->entries[i].cc = (struct rb_callcache *)rb_gc_location((VALUE)ccs->entries[i].cc);
+ }
+ }
+
+ // do not replace
+ return ID_TABLE_CONTINUE;
+}
+
+static void
+update_cc_tbl(rb_objspace_t *objspace, VALUE klass)
+{
+ struct rb_id_table *tbl = RCLASS_CC_TBL(klass);
+ if (tbl) {
+ rb_id_table_foreach_with_replace(tbl, update_cc_tbl_i, NULL, objspace);
+ }
+}
+
+static enum rb_id_table_iterator_result
update_const_table(VALUE value, void *data)
{
rb_const_entry_t *ce = (rb_const_entry_t *)value;
@@ -8257,7 +8422,10 @@ gc_update_object_references(rb_objspace_t *objspace, VALUE obj)
}
if (!RCLASS_EXT(obj)) break;
update_m_tbl(objspace, RCLASS_M_TBL(obj));
+ update_cc_tbl(objspace, obj);
+
gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj));
+
update_class_ext(objspace, RCLASS_EXT(obj));
update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
break;
@@ -8275,6 +8443,7 @@ gc_update_object_references(rb_objspace_t *objspace, VALUE obj)
}
update_class_ext(objspace, RCLASS_EXT(obj));
update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
+ update_cc_tbl(objspace, obj);
break;
case T_IMEMO:
@@ -8607,7 +8776,6 @@ gc_compact_after_gc(rb_objspace_t *objspace, int use_toward_empty, int use_doubl
gc_check_references_for_moved(objspace);
}
- rb_clear_method_cache_by_class(rb_cObject);
rb_clear_constant_cache();
heap_eden->free_pages = NULL;
heap_eden->using_page = NULL;
@@ -11550,6 +11718,9 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
if (!NIL_P(class_path)) {
APPENDF((BUFF_ARGS, "%s", RSTRING_PTR(class_path)));
}
+ else {
+ APPENDF((BUFF_ARGS, "(annon)"));
+ }
break;
}
case T_ICLASS:
@@ -11606,21 +11777,31 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
IMEMO_NAME(ast);
IMEMO_NAME(parser_strterm);
IMEMO_NAME(callinfo);
+ IMEMO_NAME(callcache);
#undef IMEMO_NAME
default: UNREACHABLE;
}
- APPENDF((BUFF_ARGS, "/%s", imemo_name));
+ APPENDF((BUFF_ARGS, "<%s> ", imemo_name));
switch (imemo_type(obj)) {
case imemo_ment: {
const rb_method_entry_t *me = &RANY(obj)->as.imemo.ment;
if (me->def) {
- APPENDF((BUFF_ARGS, "(called_id: %s, type: %s, alias: %d, owner: %s, defined_class: %s)",
+ APPENDF((BUFF_ARGS, ":%s (%s%s%s%s) type:%s alias:%d owner:%p defined_class:%p",
rb_id2name(me->called_id),
+ METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
+ METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
+ METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
+ METHOD_ENTRY_CACHED(me) ? ",cc" : "",
+ METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
rb_method_type_name(me->def->type),
- me->def->alias_count,
- obj_info(me->owner),
- obj_info(me->defined_class)));
+ me->def->alias_count,
+ (void *)me->owner, // obj_info(me->owner),
+ (void *)me->defined_class)); //obj_info(me->defined_class)));
+
+ if (me->def->type == VM_METHOD_TYPE_ISEQ) {
+ APPENDF((BUFF_ARGS, " (iseq:%p)", (void *)me->def->body.iseq.iseqptr));
+ }
}
else {
APPENDF((BUFF_ARGS, "%s", rb_id2name(me->called_id)));
@@ -11642,6 +11823,17 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
vm_ci_kwarg(ci) ? "available" : "NULL"));
break;
}
+ case imemo_callcache:
+ {
+ const struct rb_callcache *cc = (const struct rb_callcache *)obj;
+ VALUE class_path = cc->klass ? rb_class_path_cached(cc->klass) : Qnil;
+
+ APPENDF((BUFF_ARGS, "(klass:%s, cme:%s (%p) call:%p",
+ NIL_P(class_path) ? "??" : RSTRING_PTR(class_path),
+ vm_cc_cme(cc) ? rb_id2name(vm_cc_cme(cc)->called_id) : "<NULL>",
+ (void *)vm_cc_cme(cc), (void *)vm_cc_call(cc)));
+ break;
+ }
default:
break;
}
diff --git a/id_table.c b/id_table.c
index f566582479..4f8540246c 100644
--- a/id_table.c
+++ b/id_table.c
@@ -229,7 +229,7 @@ rb_id_table_lookup(struct rb_id_table *tbl, ID id, VALUE *valp)
int index = hash_table_index(tbl, key);
if (index >= 0) {
- *valp = tbl->items[index].val;
+ *valp = tbl->items[index].val;
return TRUE;
}
else {
diff --git a/insns.def b/insns.def
index 2385f33f75..aab5cca065 100644
--- a/insns.def
+++ b/insns.def
@@ -827,7 +827,7 @@ opt_nil_p
(VALUE recv)
(VALUE val)
{
- val = vm_opt_nil_p(cd, recv);
+ val = vm_opt_nil_p(GET_ISEQ(), cd, recv);
if (val == Qundef) {
CALL_SIMPLE_METHOD();
@@ -903,8 +903,9 @@ invokeblock
// attr rb_snum_t sp_inc = sp_inc_of_invokeblock(cd->ci);
// attr rb_snum_t comptime_sp_inc = sp_inc_of_invokeblock(ci);
{
- if (UNLIKELY(cd->cc.call != vm_invokeblock_i)) {
- cd->cc.call = vm_invokeblock_i; // check before setting to avoid CoW
+ if (UNLIKELY(vm_cc_call(cd->cc) != vm_invokeblock_i)) {
+ const struct rb_callcache *cc = vm_cc_new(0, NULL, vm_invokeblock_i);
+ RB_OBJ_WRITE(GET_ISEQ(), &cd->cc, cc);
}
VALUE bh = VM_BLOCK_HANDLER_NONE;
@@ -1167,7 +1168,7 @@ opt_eq
(VALUE recv, VALUE obj)
(VALUE val)
{
- val = opt_eq_func(recv, obj, cd);
+ val = opt_eq_func(GET_ISEQ(), recv, obj, cd);
if (val == Qundef) {
CALL_SIMPLE_METHOD();
@@ -1181,7 +1182,7 @@ opt_neq
(VALUE recv, VALUE obj)
(VALUE val)
{
- val = vm_opt_neq(cd, cd_eq, recv, obj);
+ val = vm_opt_neq(GET_ISEQ(), cd, cd_eq, recv, obj);
if (val == Qundef) {
CALL_SIMPLE_METHOD();
@@ -1431,7 +1432,7 @@ opt_not
(VALUE recv)
(VALUE val)
{
- val = vm_opt_not(cd, recv);
+ val = vm_opt_not(GET_ISEQ(), cd, recv);
if (val == Qundef) {
CALL_SIMPLE_METHOD();
diff --git a/internal/class.h b/internal/class.h
index 72d3b9ea54..67ea6e2a83 100644
--- a/internal/class.h
+++ b/internal/class.h
@@ -41,6 +41,7 @@ struct rb_classext_struct {
#endif
struct rb_id_table *const_tbl;
struct rb_id_table *callable_m_tbl;
+ struct rb_id_table *cc_tbl; /* ID -> [[ci, cc1], cc2, ...] */
struct rb_subclass_entry *subclasses;
struct rb_subclass_entry **parent_subclasses;
/**
@@ -83,6 +84,7 @@ typedef struct rb_classext_struct rb_classext_t;
# define RCLASS_M_TBL(c) (RCLASS(c)->m_tbl)
#endif
#define RCLASS_CALLABLE_M_TBL(c) (RCLASS_EXT(c)->callable_m_tbl)
+#define RCLASS_CC_TBL(c) (RCLASS_EXT(c)->cc_tbl)
#define RCLASS_IV_INDEX_TBL(c) (RCLASS_EXT(c)->iv_index_tbl)
#define RCLASS_ORIGIN(c) (RCLASS_EXT(c)->origin_)
#define RCLASS_REFINED_CLASS(c) (RCLASS_EXT(c)->refined_class)
diff --git a/internal/imemo.h b/internal/imemo.h
index 967dc82f01..f09a195e7b 100644
--- a/internal/imemo.h
+++ b/internal/imemo.h
@@ -29,6 +29,7 @@
#define IMEMO_FL_USER2 FL_USER6
#define IMEMO_FL_USER3 FL_USER7
#define IMEMO_FL_USER4 FL_USER8
+#define IMEMO_FL_USER5 FL_USER9
enum imemo_type {
imemo_env = 0,
@@ -43,6 +44,7 @@ enum imemo_type {
imemo_ast = 9,
imemo_parser_strterm = 10,
imemo_callinfo = 11,
+ imemo_callcache = 12,
};
/* CREF (Class REFerence) is defined in method.h */
@@ -171,6 +173,8 @@ imemo_type_p(VALUE imemo, enum imemo_type imemo_type)
}
}
+#define IMEMO_TYPE_P(v, t) imemo_type_p((VALUE)v, t)
+
static inline bool
imemo_throw_data_p(VALUE imemo)
{
diff --git a/internal/vm.h b/internal/vm.h
index 4bd2bfb1e3..26dde33975 100644
--- a/internal/vm.h
+++ b/internal/vm.h
@@ -52,44 +52,6 @@ enum method_missing_reason {
MISSING_NONE = 0x40
};
-struct rb_call_cache {
- /* inline cache: keys */
- rb_serial_t method_state;
- rb_serial_t class_serial[
- (CACHELINE
- - sizeof(rb_serial_t) /* method_state */
- - sizeof(struct rb_callable_method_entry_struct *) /* me */
- - sizeof(uintptr_t) /* method_serial */
- - sizeof(enum method_missing_reason) /* aux */
- - sizeof(VALUE (*)( /* call */
- struct rb_execution_context_struct *e,
- struct rb_control_frame_struct *,
- struct rb_calling_info *,
- const struct rb_call_data *)))
- / sizeof(rb_serial_t)
- ];
-
- /* inline cache: values */
- const struct rb_callable_method_entry_struct *me;
- uintptr_t method_serial; /* me->def->method_serial */
-
- VALUE (*call)(struct rb_execution_context_struct *ec,
- struct rb_control_frame_struct *cfp,
- struct rb_calling_info *calling,
- struct rb_call_data *cd);
-
- union {
- unsigned int index; /* used by ivar */
- enum method_missing_reason method_missing_reason; /* used by method_missing */
- } aux;
-};
-STATIC_ASSERT(cachelined, sizeof(struct rb_call_cache) <= CACHELINE);
-
-struct rb_call_data {
- const struct rb_callinfo *ci;
- struct rb_call_cache cc;
-};
-
/* vm_insnhelper.h */
rb_serial_t rb_next_class_serial(void);
@@ -139,8 +101,9 @@ MJIT_SYMBOL_EXPORT_END
VALUE rb_equal_opt(VALUE obj1, VALUE obj2);
VALUE rb_eql_opt(VALUE obj1, VALUE obj2);
+struct rb_iseq_struct;
MJIT_SYMBOL_EXPORT_BEGIN
-void rb_vm_search_method_slowpath(struct rb_call_data *cd, VALUE klass);
+void rb_vm_search_method_slowpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass);
MJIT_SYMBOL_EXPORT_END
/* vm_dump.c */
diff --git a/iseq.c b/iseq.c
index 867bbc0d63..c6c5c6e127 100644
--- a/iseq.c
+++ b/iseq.c
@@ -247,6 +247,7 @@ rb_iseq_update_references(rb_iseq_t *iseq)
if (!SPECIAL_CONST_P(cds[i].ci)) {
cds[i].ci = (struct rb_callinfo *)rb_gc_location((VALUE)cds[i].ci);
}
+ cds[i].cc = (struct rb_callcache *)rb_gc_location((VALUE)cds[i].cc);
}
}
if (FL_TEST(iseq, ISEQ_MARKABLE_ISEQ)) {
@@ -323,6 +324,11 @@ rb_iseq_mark(const rb_iseq_t *iseq)
struct rb_call_data *cds = (struct rb_call_data *)body->call_data;
for (unsigned int i=0; i<body->ci_size; i++) {
rb_gc_mark_movable((VALUE)cds[i].ci);
+ const struct rb_callcache *cc = cds[i].cc;
+ if (cc && vm_cc_markable(cds[i].cc)) {
+ rb_gc_mark_movable((VALUE)cc);
+ // TODO: check enable
+ }
}
}
@@ -351,6 +357,14 @@ rb_iseq_mark(const rb_iseq_t *iseq)
}
}
}
+
+ if (body->jit_unit && body->jit_unit->cc_entries != NULL) {
+ // TODO: move to mjit.c?
+ for (unsigned int i=0; i<body->ci_size; i++) {
+ const struct rb_callcache *cc = body->jit_unit->cc_entries[i];
+ rb_gc_mark((VALUE)cc); // pindown
+ }
+ }
}
if (FL_TEST_RAW(iseq, ISEQ_NOT_LOADED_YET)) {
@@ -663,6 +677,9 @@ finish_iseq_build(rb_iseq_t *iseq)
rb_exc_raise(err);
}
+ RB_DEBUG_COUNTER_INC(iseq_num);
+ RB_DEBUG_COUNTER_ADD(iseq_cd_num, iseq->body->ci_size);
+
rb_iseq_init_trace(iseq);
return Qtrue;
}
diff --git a/method.h b/method.h
index 519cc9bfc1..01a25d368c 100644
--- a/method.h
+++ b/method.h
@@ -69,8 +69,12 @@ typedef struct rb_callable_method_entry_struct { /* same fields with rb_method_e
#define METHOD_ENTRY_VISI(me) (rb_method_visibility_t)(((me)->flags & (IMEMO_FL_USER0 | IMEMO_FL_USER1)) >> (IMEMO_FL_USHIFT+0))
#define METHOD_ENTRY_BASIC(me) (int) (((me)->flags & (IMEMO_FL_USER2 )) >> (IMEMO_FL_USHIFT+2))
-#define METHOD_ENTRY_COMPLEMENTED(me) ((me)->flags & IMEMO_FL_USER3)
-#define METHOD_ENTRY_COMPLEMENTED_SET(me) ((me)->flags = (me)->flags | IMEMO_FL_USER3)
+#define METHOD_ENTRY_COMPLEMENTED(me) ((me)->flags & IMEMO_FL_USER3)
+#define METHOD_ENTRY_COMPLEMENTED_SET(me) ((me)->flags = (me)->flags | IMEMO_FL_USER3)
+#define METHOD_ENTRY_CACHED(me) ((me)->flags & IMEMO_FL_USER4)
+#define METHOD_ENTRY_CACHED_SET(me) ((me)->flags = (me)->flags | IMEMO_FL_USER4)
+#define METHOD_ENTRY_INVALIDATED(me) ((me)->flags & IMEMO_FL_USER5)
+#define METHOD_ENTRY_INVALIDATED_SET(me) ((me)->flags = (me)->flags | IMEMO_FL_USER5)
static inline void
METHOD_ENTRY_VISI_SET(rb_method_entry_t *me, rb_method_visibility_t visi)
@@ -229,4 +233,7 @@ void rb_scope_visibility_set(rb_method_visibility_t);
VALUE rb_unnamed_parameters(int arity);
+void rb_clear_method_cache(VALUE klass_or_module, ID mid);
+void rb_clear_method_cache_all(void);
+
#endif /* RUBY_METHOD_H */
diff --git a/mjit.c b/mjit.c
index cdce21c4ac..d3cb063ff9 100644
--- a/mjit.c
+++ b/mjit.c
@@ -25,6 +25,9 @@
#include "internal/warnings.h"
#include "mjit_worker.c"
+#include "vm_callinfo.h"
+
+static void create_unit(const rb_iseq_t *iseq);
// Copy ISeq's states so that race condition does not happen on compilation.
static void
@@ -51,14 +54,18 @@ mjit_copy_job_handler(void *data)
}
const struct rb_iseq_constant_body *body = job->iseq->body;
- if (job->cc_entries) {
- unsigned int i;
- struct rb_call_cache *sink = job->cc_entries;
- const struct rb_call_data *calls = body->call_data;
- for (i = 0; i < body->ci_size; i++) {
- *sink++ = calls[i].cc;
+ unsigned int ci_size = body->ci_size;
+ if (ci_size > 0) {
+ const struct rb_callcache **cc_entries = ALLOC_N(const struct rb_callcache *, ci_size);
+ if (body->jit_unit == NULL) {
+ create_unit(job->iseq);
+ }
+ body->jit_unit->cc_entries = cc_entries;
+ for (unsigned int i=0; i<ci_size; i++) {
+ cc_entries[i] = body->call_data[i].cc;
}
}
+
if (job->is_entries) {
memcpy(job->is_entries, body->is_entries, sizeof(union iseq_inline_storage_entry) * body->is_size);
}
diff --git a/mjit.h b/mjit.h
index bdc186f788..15be560786 100644
--- a/mjit.h
+++ b/mjit.h
@@ -70,6 +70,35 @@ struct rb_mjit_compile_info {
bool disable_inlining;
};
+// The unit structure that holds metadata of ISeq for MJIT.
+struct rb_mjit_unit {
+ // Unique order number of unit.
+ int id;
+ // Dlopen handle of the loaded object file.
+ void *handle;
+ rb_iseq_t *iseq;
+#ifndef _MSC_VER
+ // This value is always set for `compact_all_jit_code`. Also used for lazy deletion.
+ char *o_file;
+ // true if it's inherited from parent Ruby process and lazy deletion should be skipped.
+ // `o_file = NULL` can't be used to skip lazy deletion because `o_file` could be used
+ // by child for `compact_all_jit_code`.
+ bool o_file_inherited_p;
+#endif
+#if defined(_WIN32)
+ // DLL cannot be removed while loaded on Windows. If this is set, it'll be lazily deleted.
+ char *so_file;
+#endif
+ // Only used by unload_units. Flag to check this unit is currently on stack or not.
+ char used_code_p;
+ struct list_node unode;
+ // mjit_compile's optimization switches
+ struct rb_mjit_compile_info compile_info;
+
+ // captured CC values, they should be marked with iseq.
+ const struct rb_callcache **cc_entries; // size: iseq->body->ci_size
+};
+
typedef VALUE (*mjit_func_t)(rb_execution_context_t *, rb_control_frame_t *);
RUBY_SYMBOL_EXPORT_BEGIN
diff --git a/mjit_compile.c b/mjit_compile.c
index 57037af20c..e4f7cf292a 100644
--- a/mjit_compile.c
+++ b/mjit_compile.c
@@ -41,9 +41,9 @@ call_data_index(CALL_DATA cd, const struct rb_iseq_constant_body *body)
// For propagating information needed for lazily pushing a frame.
struct inlined_call_context {
int orig_argc; // ci->orig_argc
- VALUE me; // cc->me
- int param_size; // def_iseq_ptr(cc->me->def)->body->param.size
- int local_size; // def_iseq_ptr(cc->me->def)->body->local_table_size
+ VALUE me; // vm_cc_cme(cc)
+ int param_size; // def_iseq_ptr(vm_cc_cme(cc)->def)->body->param.size
+ int local_size; // def_iseq_ptr(vm_cc_cme(cc)->def)->body->local_table_size
};
// Storage to keep compiler's status. This should have information
@@ -57,7 +57,6 @@ struct compile_status {
bool local_stack_p;
// Safely-accessible cache entries copied from main thread.
union iseq_inline_storage_entry *is_entries;
- struct rb_call_cache *cc_entries;
// Mutated optimization levels
struct rb_mjit_compile_info *compile_info;
// If `inlined_iseqs[pos]` is not NULL, `mjit_compile_body` tries to inline ISeq there.
@@ -79,13 +78,11 @@ struct case_dispatch_var {
VALUE last_value;
};
-// Returns true if call cache is still not obsoleted and cc->me->def->type is available.
+// Returns true if call cache is still not obsoleted and vm_cc_cme(cc)->def->type is available.
static bool
has_valid_method_type(CALL_CACHE cc)
{
- extern bool mjit_valid_class_serial_p(rb_serial_t class_serial);
- return GET_GLOBAL_METHOD_STATE() == cc->method_state
- && mjit_valid_class_serial_p(cc->class_serial[0]) && cc->me;
+ return vm_cc_cme(cc) != NULL;
}
// Returns true if iseq can use fastpath for setup, otherwise NULL. This becomes true in the same condition
@@ -276,7 +273,8 @@ compile_cancel_handler(FILE *f, const struct rb_iseq_constant_body *body, struct
fprintf(f, " return Qundef;\n");
}
-extern bool mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc_entries, union iseq_inline_storage_entry *is_entries);
+extern bool mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq,
+ union iseq_inline_storage_entry *is_entries);
static bool
mjit_compile_body(FILE *f, const rb_iseq_t *iseq, struct compile_status *status)
@@ -368,8 +366,6 @@ inlinable_iseq_p(const struct rb_iseq_constant_body *body)
.stack_size_for_pos = (int *)alloca(sizeof(int) * body->iseq_size), \
.inlined_iseqs = compile_root_p ? \
alloca(sizeof(const struct rb_iseq_constant_body *) * body->iseq_size) : NULL, \
- .cc_entries = body->ci_size > 0 ? \
- alloca(sizeof(struct rb_call_cache) * body->ci_size) : NULL, \
.is_entries = (body->is_size > 0) ? \
alloca(sizeof(union iseq_inline_storage_entry) * body->is_size) : NULL, \
.compile_info = compile_root_p ? \
@@ -394,17 +390,18 @@ precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status
#else
int insn = (int)body->iseq_encoded[pos];
#endif
-
if (insn == BIN(opt_send_without_block)) { // `compile_inlined_cancel_handler` supports only `opt_send_without_block`
CALL_DATA cd = (CALL_DATA)body->iseq_encoded[pos + 1];
const struct rb_callinfo *ci = cd->ci;
- CALL_CACHE cc_copy = status->cc_entries + call_data_index(cd, body); // use copy to avoid race condition
+ const struct rb_callcache *cc = iseq->body->jit_unit->cc_entries[call_data_index(cd, body)]; // use copy to avoid race condition
const rb_iseq_t *child_iseq;
- if (has_valid_method_type(cc_copy) &&
- !(vm_ci_flag(ci) & VM_CALL_TAILCALL) && // inlining only non-tailcall path
- cc_copy->me->def->type == VM_METHOD_TYPE_ISEQ && fastpath_applied_iseq_p(ci, cc_copy, child_iseq = def_iseq_ptr(cc_copy->me->def)) && // CC_SET_FASTPATH in vm_callee_setup_arg
- inlinable_iseq_p(child_iseq->body)) {
+ if (has_valid_method_type(cc) &&
+ !(vm_ci_flag(ci) & VM_CALL_TAILCALL) && // inlining only non-tailcall path
+ vm_cc_cme(cc)->def->type == VM_METHOD_TYPE_ISEQ &&
+ fastpath_applied_iseq_p(ci, cc, child_iseq = def_iseq_ptr(vm_cc_cme(cc)->def)) &&
+ // CC_SET_FASTPATH in vm_callee_setup_arg
+ inlinable_iseq_p(child_iseq->body)) {
status->inlined_iseqs[pos] = child_iseq->body;
if (mjit_opts.verbose >= 1) // print beforehand because ISeq may be GCed during copy job.
@@ -418,12 +415,12 @@ precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status
INIT_COMPILE_STATUS(child_status, child_iseq->body, false);
child_status.inline_context = (struct inlined_call_context){
.orig_argc = vm_ci_argc(ci),
- .me = (VALUE)cc_copy->me,
+ .me = (VALUE)vm_cc_cme(cc),
.param_size = child_iseq->body->param.size,
.local_size = child_iseq->body->local_table_size
};
- if ((child_status.cc_entries != NULL || child_status.is_entries != NULL)
- && !mjit_copy_cache_from_main_thread(child_iseq, child_status.cc_entries, child_status.is_entries))
+ if ((child_iseq->body->ci_size > 0 || child_status.is_entries != NULL)
+ && !mjit_copy_cache_from_main_thread(child_iseq, child_status.is_entries))
return false;
fprintf(f, "ALWAYS_INLINE(static VALUE _mjit_inlined_%d(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE orig_self, const rb_iseq_t *original_iseq));\n", pos);
@@ -454,9 +451,10 @@ mjit_compile(FILE *f, const rb_iseq_t *iseq, const char *funcname)
struct compile_status status;
INIT_COMPILE_STATUS(status, iseq->body, true);
- if ((status.cc_entries != NULL || status.is_entries != NULL)
- && !mjit_copy_cache_from_main_thread(iseq, status.cc_entries, status.is_entries))
+ if ((iseq->body->ci_size > 0 || status.is_entries != NULL)
+ && !mjit_copy_cache_from_main_thread(iseq, status.is_entries)) {
return false;
+ }
if (!status.compile_info->disable_send_cache && !status.compile_info->disable_inlining) {
if (!precompile_inlinable_iseqs(f, iseq, &status))
diff --git a/mjit_worker.c b/mjit_worker.c
index ce8133ac7d..85411847d7 100644
--- a/mjit_worker.c
+++ b/mjit_worker.c
@@ -122,32 +122,6 @@ typedef intptr_t pid_t;
#define MJIT_TMP_PREFIX "_ruby_mjit_"
-// The unit structure that holds metadata of ISeq for MJIT.
-struct rb_mjit_unit {
- // Unique order number of unit.
- int id;
- // Dlopen handle of the loaded object file.
- void *handle;
- rb_iseq_t *iseq;
-#ifndef _MSC_VER
- // This value is always set for `compact_all_jit_code`. Also used for lazy deletion.
- char *o_file;
- // true if it's inherited from parent Ruby process and lazy deletion should be skipped.
- // `o_file = NULL` can't be used to skip lazy deletion because `o_file` could be used
- // by child for `compact_all_jit_code`.
- bool o_file_inherited_p;
-#endif
-#if defined(_WIN32)
- // DLL cannot be removed while loaded on Windows. If this is set, it'll be lazily deleted.
- char *so_file;
-#endif
- // Only used by unload_units. Flag to check this unit is currently on stack or not.
- char used_code_p;
- struct list_node unode;
- // mjit_compile's optimization switches
- struct rb_mjit_compile_info compile_info;
-};
-
// Linked list of struct rb_mjit_unit.
struct rb_mjit_unit_list {
struct list_head head;
@@ -1117,7 +1091,6 @@ convert_unit_to_func(struct rb_mjit_unit *unit)
typedef struct {
const rb_iseq_t *iseq;
- struct rb_call_cache *cc_entries;
union iseq_inline_storage_entry *is_entries;
bool finish_p;
} mjit_copy_job_t;
@@ -1138,7 +1111,7 @@ int rb_workqueue_register(unsigned flags, rb_postponed_job_func_t , void *);
// We're lazily copying cache values from main thread because these cache values
// could be different between ones on enqueue timing and ones on dequeue timing.
bool
-mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc_entries, union iseq_inline_storage_entry *is_entries)
+mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, union iseq_inline_storage_entry *is_entries)
{
mjit_copy_job_t *job = &mjit_copy_job; // just a short hand
@@ -1146,7 +1119,6 @@ mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc
job->finish_p = true; // disable dispatching this job in mjit_copy_job_handler while it's being modified
CRITICAL_SECTION_FINISH(3, "in mjit_copy_cache_from_main_thread");
- job->cc_entries = cc_entries;
job->is_entries = is_entries;
CRITICAL_SECTION_START(3, "in mjit_copy_cache_from_main_thread");
diff --git a/test/-ext-/tracepoint/test_tracepoint.rb b/test/-ext-/tracepoint/test_tracepoint.rb
index 1fc1657f5b..4f480bb856 100644
--- a/test/-ext-/tracepoint/test_tracepoint.rb
+++ b/test/-ext-/tracepoint/test_tracepoint.rb
@@ -10,33 +10,25 @@ class TestTracepointObj < Test::Unit::TestCase
end
def test_tracks_objspace_events
- result = Bug.tracepoint_track_objspace_events{
- Object.new
- }
- object_new_newobj = result[0]
-
result = EnvUtil.suppress_warning {eval(<<-EOS, nil, __FILE__, __LINE__+1)}
Bug.tracepoint_track_objspace_events {
99
'abc'
_="foobar"
- Object.new
nil
}
EOS
newobj_count, free_count, gc_start_count, gc_end_mark_count, gc_end_sweep_count, *newobjs = *result
- assert_equal 1 + object_new_newobj, newobj_count
- assert_equal 1 + object_new_newobj, newobjs.size
+ assert_equal 1, newobj_count
+ assert_equal 1, newobjs.size
assert_equal 'foobar', newobjs[0]
- assert_equal Object, newobjs[1].class
assert_operator free_count, :>=, 0
assert_operator gc_start_count, :==, gc_end_mark_count
assert_operator gc_start_count, :>=, gc_end_sweep_count
end
def test_tracks_objspace_count
- return
stat1 = {}
stat2 = {}
GC.disable
diff --git a/test/ruby/test_gc.rb b/test/ruby/test_gc.rb
index ef99f69f50..9442041ee5 100644
--- a/test/ruby/test_gc.rb
+++ b/test/ruby/test_gc.rb
@@ -94,6 +94,9 @@ class TestGc < Test::Unit::TestCase
GC.start
GC.stat(stat)
ObjectSpace.count_objects(count)
+ # repeat same methods invocation for cache object creation.
+ GC.stat(stat)
+ ObjectSpace.count_objects(count)
assert_equal(count[:TOTAL]-count[:FREE], stat[:heap_live_slots])
assert_equal(count[:FREE], stat[:heap_free_slots])
diff --git a/test/ruby/test_inlinecache.rb b/test/ruby/test_inlinecache.rb
new file mode 100644
index 0000000000..90d0189d4c
--- /dev/null
+++ b/test/ruby/test_inlinecache.rb
@@ -0,0 +1,64 @@
+# -*- coding: us-ascii -*-
+# frozen_string_literal: true
+
+require 'test/unit'
+
+class TestMethod < Test::Unit::TestCase
+ def test_alias
+ m0 = Module.new do
+ def foo; :M0 end
+ end
+ m1 = Module.new do
+ include m0
+ end
+ c = Class.new do
+ include m1
+ alias bar foo
+ end
+ d = Class.new(c) do
+ end
+
+ test = -> do
+ d.new.bar
+ end
+
+ assert_equal :M0, test[]
+
+ c.class_eval do
+ def bar
+ :C
+ end
+ end
+
+ assert_equal :C, test[]
+ end
+
+ def test_zsuper
+ assert_separately [], <<-EOS
+ class C
+ private def foo
+ :C
+ end
+ end
+
+ class D < C
+ public :foo
+ end
+
+ class E < D; end
+ class F < E; end
+
+ test = -> do
+ F.new().foo
+ end
+
+ assert_equal :C, test[]
+
+ class E
+ def foo; :E; end
+ end
+
+ assert_equal :E, test[]
+ EOS
+ end
+end
diff --git a/tool/mk_call_iseq_optimized.rb b/tool/mk_call_iseq_optimized.rb
index 9d4caf3465..448d44039f 100644
--- a/tool/mk_call_iseq_optimized.rb
+++ b/tool/mk_call_iseq_optimized.rb
@@ -24,7 +24,7 @@ static VALUE
#{fname(param, local)}(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
{
RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
- return vm_call_iseq_setup_normal(ec, cfp, calling, cd->cc.me, 0, #{param}, #{local});
+ return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cd->cc), 0, #{param}, #{local});
}
EOS
diff --git a/tool/ruby_vm/views/_mjit_compile_send.erb b/tool/ruby_vm/views/_mjit_compile_send.erb
index af39512f73..6c5a362699 100644
--- a/tool/ruby_vm/views/_mjit_compile_send.erb
+++ b/tool/ruby_vm/views/_mjit_compile_send.erb
@@ -14,9 +14,9 @@
MAYBE_UNUSED(<%= ope.fetch(:decl) %>) = (<%= ope.fetch(:type) %>)operands[<%= i %>];
% end
% # compiler: Use copied cc to avoid race condition
- CALL_CACHE cc_copy = status->cc_entries + call_data_index(cd, body);
+ const struct rb_callcache *captured_cc = body->jit_unit->cc_entries[call_data_index(cd, body)];
%
- if (!status->compile_info->disable_send_cache && has_valid_method_type(cc_copy)) {
+ if (!status->compile_info->disable_send_cache && has_valid_method_type(captured_cc)) {
const rb_iseq_t *iseq;
const CALL_INFO ci = cd->ci;
unsigned int argc = vm_ci_argc(ci); // this `argc` variable is for calculating a value's position on stack considering `blockarg`.
@@ -25,7 +25,10 @@
% end
if (!(vm_ci_flag(ci) & VM_CALL_TAILCALL) // inlining non-tailcall path
- && cc_copy->me->def->type == VM_METHOD_TYPE_ISEQ && fastpath_applied_iseq_p(ci, cc_copy, iseq = def_iseq_ptr(cc_copy->me->def))) { // CC_SET_FASTPATH in vm_callee_setup_arg
+ && vm_cc_cme(captured_cc)->def->type == VM_METHOD_TYPE_ISEQ
+ && fastpath_applied_iseq_p(ci, captured_cc, iseq = def_iseq_ptr(vm_cc_cme(captured_cc)->def))) {
+ // CC_SET_FASTPATH in vm_callee_setup_arg
+
int param_size = iseq->body->param.size;
fprintf(f, "{\n");
@@ -35,8 +38,10 @@
}
% # JIT: Invalidate call cache if it requires vm_search_method. This allows to inline some of following things.
- fprintf(f, " if (UNLIKELY(GET_GLOBAL_METHOD_STATE() != %"PRI_SERIALT_PREFIX"u ||\n", cc_copy->method_state);
- fprintf(f, " RCLASS_SERIAL(CLASS_OF(stack[%d])) != %"PRI_SERIALT_PREFIX"u)) {\n", b->stack_size - 1 - argc, cc_copy->class_serial[0]);
+ fprintf(f, " const struct rb_call_data *cd = (const struct rb_callcache *)0x%"PRIxVALUE";\n", (VALUE)cd);
+ fprintf(f, " const struct rb_callcache *cc = (const struct rb_callcache *)0x%"PRIxVALUE";\n", (VALUE)captured_cc);
+ fprintf(f, " if (UNLIKELY(cd->cc != cc || !vm_cc_valid_p(cc, CLASS_OF(stack[%d])))) {\n", b->stack_size - 1 - argc);
+ // TODO: need to free cc
fprintf(f, " reg_cfp->pc = original_body_iseq + %d;\n", pos);
fprintf(f, " reg_cfp->sp = vm_base_ptr(reg_cfp) + %d;\n", b->stack_size);
fprintf(f, " goto send_cancel;\n");
@@ -59,18 +64,18 @@
fprintf(f, " {\n");
fprintf(f, " struct rb_calling_info calling;\n");
% if insn.name == 'send'
- fprintf(f, " calling.block_handler = vm_caller_setup_arg_block(ec, reg_cfp, (CALL_INFO)0x%"PRIxVALUE", (rb_iseq_t *)0x%"PRIxVALUE", FALSE);\n", (VALUE)ci, (VALUE)blockiseq);
+ fprintf(f, " calling.block_handler = vm_caller_setup_arg_block(ec, reg_cfp, cd->ci, (rb_iseq_t *)0x%"PRIxVALUE", FALSE);\n", (VALUE)blockiseq);
% else
fprintf(f, " calling.block_handler = VM_BLOCK_HANDLER_NONE;\n");
% end
fprintf(f, " calling.argc = %d;\n", vm_ci_argc(ci));
fprintf(f, " calling.recv = stack[%d];\n", b->stack_size - 1 - argc);
-% # JIT: Special CALL_METHOD. Bypass cc_copy->call and inline vm_call_iseq_setup_normal for vm_call_iseq_setup_func FASTPATH.
+% # JIT: Special CALL_METHOD. Bypass captured_cc->call and inline vm_call_iseq_setup_normal for vm_call_iseq_setup_func FASTPATH.
fprintf(f, " {\n");
fprintf(f, " VALUE v;\n");
- fprintf(f, " vm_call_iseq_setup_normal(ec, reg_cfp, &calling, (const rb_callable_method_entry_t *)0x%"PRIxVALUE", 0, %d, %d);\n",
- (VALUE)cc_copy->me, param_size, iseq->body->local_table_size); // fastpath_applied_iseq_p checks rb_simple_iseq_p, which ensures has_opt == FALSE
+ fprintf(f, " vm_call_iseq_setup_normal(ec, reg_cfp, &calling, vm_cc_cme(cc), 0, %d, %d);\n",
+ param_size, iseq->body->local_table_size); // fastpath_applied_iseq_p checks rb_simple_iseq_p, which ensures has_opt == FALSE
if (iseq->body->catch_except_p) {
fprintf(f, " VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);\n");
fprintf(f, " v = vm_exec(ec, TRUE);\n");
diff --git a/tool/ruby_vm/views/mjit_compile.inc.erb b/tool/ruby_vm/views/mjit_compile.inc.erb
index 95e71183d9..6ab57ae164 100644
--- a/tool/ruby_vm/views/mjit_compile.inc.erb
+++ b/tool/ruby_vm/views/mjit_compile.inc.erb
@@ -57,7 +57,7 @@ switch (insn) {
% when *send_compatible_opt_insns
% # To avoid cancel, just emit `opt_send_without_block` instead of `opt_*` insn if call cache is populated.
% cd_index = insn.opes.index { |o| o.fetch(:type) == 'CALL_DATA' }
- if (has_valid_method_type(status->cc_entries + call_data_index((CALL_DATA)operands[<%= cd_index %>], body))) {
+ if (has_valid_method_type(body->jit_unit->cc_entries[call_data_index((CALL_DATA)operands[<%= cd_index %>], body)])) {
<%= render 'mjit_compile_send', locals: { insn: opt_send_without_block } -%>
<%= render 'mjit_compile_insn', locals: { insn: opt_send_without_block } -%>
break;
diff --git a/vm.c b/vm.c
index 7482db1b54..50e317408b 100644
--- a/vm.c
+++ b/vm.c
@@ -386,6 +386,8 @@ rb_serial_t ruby_vm_global_method_state = 1;
rb_serial_t ruby_vm_global_constant_state = 1;
rb_serial_t ruby_vm_class_serial = 1;
+const struct rb_callcache *vm_empty_cc;
+
static void thread_free(void *ptr);
void
@@ -2806,8 +2808,9 @@ static VALUE
m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
{
REWIND_CFP({
- rb_undef(cbase, SYM2ID(sym));
- rb_clear_method_cache_by_class(self);
+ ID mid = SYM2ID(sym);
+ rb_undef(cbase, mid);
+ rb_clear_method_cache(self, mid);
});
return Qnil;
}
@@ -2962,6 +2965,13 @@ f_lambda(VALUE _)
return rb_block_lambda();
}
+static VALUE
+vm_mtbl(VALUE self, VALUE obj, VALUE sym)
+{
+ vm_mtbl_dump(CLASS_OF(obj), SYM2ID(sym));
+ return Qnil;
+}
+
void
Init_VM(void)
{
@@ -3249,9 +3259,11 @@ Init_VM(void)
#if VMDEBUG
rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
+ rb_define_singleton_method(rb_cRubyVM, "mtbl", vm_mtbl, 2);
#else
(void)sdr;
(void)nsdr;
+ (void)vm_mtbl;
#endif
/* VM bootstrap: phase 2 */
@@ -3348,6 +3360,10 @@ Init_vm_objects(void)
vm->frozen_strings = st_init_table_with_size(&rb_fstring_hash_type, 10000);
rb_objspace_gc_enable(vm->objspace);
+
+ vm_empty_cc = vm_cc_new(0, NULL, vm_call_general);
+ FL_SET_RAW(vm_empty_cc, VM_CALLCACHE_UNMARKABLE);
+ rb_gc_register_mark_object((VALUE)vm_empty_cc);
}
/* top self */
@@ -3716,6 +3732,12 @@ vm_collect_usage_register(int reg, int isset)
}
#endif
+MJIT_FUNC_EXPORTED const struct rb_callcache *
+rb_vm_empty_cc(void)
+{
+ return vm_empty_cc;
+}
+
#endif /* #ifndef MJIT_HEADER */
#include "vm_call_iseq_optimized.inc" /* required from vm_insnhelper.c */
diff --git a/vm_callinfo.h b/vm_callinfo.h
index 33d4f614da..32b0131fa1 100644
--- a/vm_callinfo.h
+++ b/vm_callinfo.h
@@ -75,13 +75,13 @@ struct rb_callinfo {
#define CI_EMBED_FLAG 0x01
#define CI_EMBED_ARGC_SHFT (CI_EMBED_TAG_bits)
-#define CI_EMBED_ARGC_MASK ((1UL<<CI_EMBED_ARGC_bits) - 1)
+#define CI_EMBED_ARGC_MASK ((((VALUE)1)<<CI_EMBED_ARGC_bits) - 1)
#define CI_EMBED_FLAG_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits)
-#define CI_EMBED_FLAG_MASK ((1UL<<CI_EMBED_FLAG_bits) - 1)
+#define CI_EMBED_FLAG_MASK ((((VALUE)1)<<CI_EMBED_FLAG_bits) - 1)
#define CI_EMBED_ID_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits)
-#define CI_EMBED_ID_MASK ((1UL<<CI_EMBED_ID_bits) - 1)
+#define CI_EMBED_ID_MASK ((((VALUE)1)<<CI_EMBED_ID_bits) - 1)
-static inline int
+static inline bool
vm_ci_packed_p(const struct rb_callinfo *ci)
{
#if USE_EMBED_CI
@@ -89,7 +89,7 @@ vm_ci_packed_p(const struct rb_callinfo *ci)
return 1;
}
else {
- VM_ASSERT(imemo_type_p((VALUE)ci, imemo_callinfo));
+ VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
return 0;
}
#else
@@ -97,6 +97,17 @@ vm_ci_packed_p(const struct rb_callinfo *ci)
#endif
}
+static inline bool
+vm_ci_p(const struct rb_callinfo *ci)
+{
+ if (vm_ci_packed_p(ci) || IMEMO_TYPE_P(ci, imemo_callinfo)) {
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
+
static inline ID
vm_ci_mid(const struct rb_callinfo *ci)
{
@@ -141,7 +152,6 @@ vm_ci_kwarg(const struct rb_callinfo *ci)
}
}
-#if 0 // for debug
static inline void
vm_ci_dump(const struct rb_callinfo *ci)
{
@@ -153,7 +163,6 @@ vm_ci_dump(const struct rb_callinfo *ci)
rp(ci);
}
}
-#endif
#define vm_ci_new(mid, flag, argc, kwarg) vm_ci_new_(mid, flag, argc, kwarg, __FILE__, __LINE__)
#define vm_ci_new_runtime(mid, flag, argc, kwarg) vm_ci_new_runtime_(mid, flag, argc, kwarg, __FILE__, __LINE__)
@@ -162,12 +171,11 @@ static inline const struct rb_callinfo *
vm_ci_new_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
{
#if USE_EMBED_CI
-
if ((mid & ~CI_EMBED_ID_MASK) == 0 &&
(argc & ~CI_EMBED_ARGC_MASK) == 0 &&
kwarg == NULL) {
VALUE embed_ci =
- 1L |
+ 1L |
((VALUE)argc << CI_EMBED_ARGC_SHFT) |
((VALUE)flag << CI_EMBED_FLAG_SHFT) |
((VALUE)mid << CI_EMBED_ID_SHFT);
@@ -175,8 +183,11 @@ vm_ci_new_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinf
return (const struct rb_callinfo *)embed_ci;
}
#endif
+
const bool debug = 0;
if (debug) fprintf(stderr, "%s:%d ", file, line);
+
+ // TODO: dedup
const struct rb_callinfo *ci = (const struct rb_callinfo *)
rb_imemo_new(imemo_callinfo,
(VALUE)mid,
@@ -204,3 +215,209 @@ vm_ci_new_runtime_(ID mid, unsigned int flag, unsigned int argc, const struct rb
RB_DEBUG_COUNTER_INC(ci_runtime);
return vm_ci_new_(mid, flag, argc, kwarg, file, line);
}
+
+typedef VALUE (*vm_call_handler)(
+ struct rb_execution_context_struct *ec,
+ struct rb_control_frame_struct *cfp,
+ struct rb_calling_info *calling,
+ struct rb_call_data *cd);
+
+// imemo_callcache
+
+struct rb_callcache {
+ const VALUE flags;
+
+ /* inline cache: key */
+ const VALUE klass; // should not mark it because klass can not be free'd
+ // because of this marking. When klass is collected,
+ // cc will be cleared (cc->klass = 0) at vm_ccs_free().
+
+ /* inline cache: values */
+ const struct rb_callable_method_entry_struct * const cme_;
+ const vm_call_handler call_;
+
+ union {
+ const unsigned int attr_index;
+ const enum method_missing_reason method_missing_reason; /* used by method_missing */
+ } aux_;
+};
+
+#define VM_CALLCACHE_UNMARKABLE IMEMO_FL_USER0
+
+static inline const struct rb_callcache *
+vm_cc_new(VALUE klass,
+ const struct rb_callable_method_entry_struct *cme,
+ vm_call_handler call)
+{
+ const struct rb_callcache *cc = (const struct rb_callcache *)rb_imemo_new(imemo_callcache, (VALUE)cme, (VALUE)call, 0, klass);
+ RB_DEBUG_COUNTER_INC(cc_new);
+ return cc;
+}
+
+static inline const struct rb_callcache *
+vm_cc_fill(struct rb_callcache *cc,
+ VALUE klass,
+ const struct rb_callable_method_entry_struct *cme,
+ vm_call_handler call)
+{
+ struct rb_callcache cc_body = {
+ .flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
+ .klass = klass,
+ .cme_ = cme,
+ .call_ = call,
+ };
+ MEMCPY(cc, &cc_body, struct rb_callcache, 1);
+ return cc;
+}
+
+static inline bool
+vm_cc_class_check(const struct rb_callcache *cc, VALUE klass)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(cc->klass == 0 ||
+ RB_TYPE_P(cc->klass, T_CLASS) || RB_TYPE_P(cc->klass, T_ICLASS));
+ return cc->klass == klass;
+}
+
+static inline const struct rb_callable_method_entry_struct *
+vm_cc_cme(const struct rb_callcache *cc)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ return cc->cme_;
+}
+
+static inline vm_call_handler
+vm_cc_call(const struct rb_callcache *cc)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ return cc->call_;
+}
+
+static inline unsigned int
+vm_cc_attr_index(const struct rb_callcache *cc)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ return cc->aux_.attr_index;
+}
+
+static inline unsigned int
+vm_cc_cmethod_missing_reason(const struct rb_callcache *cc)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ return cc->aux_.method_missing_reason;
+}
+
+static inline int
+vm_cc_markable(const struct rb_callcache *cc)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ return FL_TEST_RAW(cc, VM_CALLCACHE_UNMARKABLE) == 0;
+}
+
+static inline bool
+vm_cc_valid_p(const struct rb_callcache *cc, VALUE klass)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ if (cc->klass == klass && !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc))) {
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
+
+#ifndef MJIT_HEADER
+extern const struct rb_callcache *vm_empty_cc;
+#else
+extern const struct rb_callcache *rb_vm_empty_cc(void);
+#endif
+
+static inline const struct rb_callcache *
+vm_cc_empty(void)
+{
+#ifndef MJIT_HEADER
+ return vm_empty_cc;
+#else
+ return rb_vm_empty_cc();
+#endif
+}
+
+/* callcache: mutete */
+
+static inline void
+vm_cc_cme_set(const struct rb_callcache *cc, const struct rb_callable_method_entry_struct *cme)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(cc != vm_cc_empty());
+ VM_ASSERT(vm_cc_cme(cc) != NULL);
+ VM_ASSERT(vm_cc_cme(cc)->called_id == cme->called_id);
+ VM_ASSERT(!vm_cc_markable(cc)); // only used for vm_eval.c
+
+ *((const struct rb_callable_method_entry_struct **)&cc->cme_) = cme;
+}
+
+static inline void
+vm_cc_call_set(const struct rb_callcache *cc, vm_call_handler call)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(cc != vm_cc_empty());
+ *(vm_call_handler *)&cc->call_ = call;
+}
+
+static inline void
+vm_cc_attr_index_set(const struct rb_callcache *cc, int index)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(cc != vm_cc_empty());
+ *(int *)&cc->aux_.attr_index = index;
+}
+
+static inline void
+vm_cc_method_missing_reason_set(const struct rb_callcache *cc, enum method_missing_reason reason)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(cc != vm_cc_empty());
+ *(enum method_missing_reason *)&cc->aux_.method_missing_reason = reason;
+}
+
+static inline void
+vm_cc_invalidate(const struct rb_callcache *cc)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(cc != vm_cc_empty());
+ VM_ASSERT(cc->klass != 0); // should be enable
+
+ *(VALUE *)&cc->klass = 0;
+ RB_DEBUG_COUNTER_INC(cc_ent_invalidate);
+}
+
+/* calldata */
+
+struct rb_call_data {
+ const struct rb_callinfo *ci;
+ const struct rb_callcache *cc;
+};
+
+struct rb_class_cc_entries {
+#if VM_CHECK_MODE > 0
+ VALUE debug_sig;
+#endif
+ int capa;
+ int len;
+ const struct rb_callable_method_entry_struct *cme;
+ struct rb_class_cc_entries_entry {
+ const struct rb_callinfo *ci;
+ const struct rb_callcache *cc;
+ } *entries;
+};
+
+#if VM_CHECK_MODE > 0
+static inline bool
+vm_ccs_p(const struct rb_class_cc_entries *ccs)
+{
+ return ccs->debug_sig == ~(VALUE)ccs;
+}
+#endif
+
+// gc.c
+void rb_vm_ccs_free(struct rb_class_cc_entries *ccs);
diff --git a/vm_core.h b/vm_core.h
index fb63c1c119..d84e05d99b 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -253,7 +253,6 @@ struct rb_calling_info {
};
struct rb_execution_context_struct;
-typedef VALUE (*vm_call_handler)(struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, struct rb_calling_info *calling, struct rb_call_data *cd);
#if 1
#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
@@ -1088,7 +1087,7 @@ typedef struct iseq_inline_cache_entry *IC;
typedef struct iseq_inline_iv_cache_entry *IVC;
typedef union iseq_inline_storage_entry *ISE;
typedef const struct rb_callinfo *CALL_INFO;
-typedef struct rb_call_cache *CALL_CACHE;
+typedef const struct rb_callcache *CALL_CACHE;
typedef struct rb_call_data *CALL_DATA;
typedef VALUE CDHASH;
diff --git a/vm_dump.c b/vm_dump.c
index 64a210543c..5bcbac1746 100644
--- a/vm_dump.c
+++ b/vm_dump.c
@@ -111,7 +111,7 @@ control_frame_dump(const rb_execution_context_t *ec, const rb_control_frame_t *c
}
if (cfp->iseq != 0) {
-#define RUBY_VM_IFUNC_P(ptr) imemo_type_p((VALUE)ptr, imemo_ifunc)
+#define RUBY_VM_IFUNC_P(ptr) IMEMO_TYPE_P(ptr, imemo_ifunc)
if (RUBY_VM_IFUNC_P(cfp->iseq)) {
iseq_name = "<ifunc>";
}
@@ -167,7 +167,7 @@ control_frame_dump(const rb_execution_context_t *ec, const rb_control_frame_t *c
char buff[0x100];
if (me) {
- if (imemo_type_p((VALUE)me, imemo_ment)) {
+ if (IMEMO_TYPE_P(me, imemo_ment)) {
fprintf(stderr, " me:\n");
fprintf(stderr, " called_id: %s, type: %s\n", rb_id2name(me->called_id), rb_method_type_name(me->def->type));
fprintf(stderr, " owner class: %s\n", rb_raw_obj_info(buff, 0x100, me->owner));
diff --git a/vm_eval.c b/vm_eval.c
index 0f51d9a841..49cc92780b 100644
--- a/vm_eval.c
+++ b/vm_eval.c
@@ -47,7 +47,8 @@ rb_vm_call0(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE
{
struct rb_calling_info calling = { Qundef, recv, argc, kw_splat, };
const struct rb_callinfo *ci = vm_ci_new_runtime(id, kw_splat ? VM_CALL_KW_SPLAT : 0, argc, NULL);
- const struct rb_call_cache cc = { 0, { 0, }, me, me->def->method_serial, vm_call_general, { 0, }, };
+ struct rb_callcache cc_body;
+ const struct rb_callcache *cc = vm_cc_fill(&cc_body, 0, me, vm_call_general);
struct rb_call_data cd = { ci, cc, };
return vm_call0_body(ec, &calling, &cd, argv);
}
@@ -56,9 +57,9 @@ static VALUE
vm_call0_cfunc_with_frame(rb_execution_context_t* ec, struct rb_calling_info *calling, struct rb_call_data *cd, const VALUE *argv)
{
const struct rb_callinfo *ci = cd->ci;
- const struct rb_call_cache *cc = &cd->cc;
+ const struct rb_callcache *cc = cd->cc;
VALUE val;
- const rb_callable_method_entry_t *me = cc->me;
+ const rb_callable_method_entry_t *me = vm_cc_cme(cc);
const rb_method_cfunc_t *cfunc = UNALIGNED_MEMBER_PTR(me->def, body.cfunc);
int len = cfunc->argc;
VALUE recv = calling->recv;
@@ -109,14 +110,14 @@ static VALUE
vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struct rb_call_data *cd, const VALUE *argv)
{
const struct rb_callinfo *ci = cd->ci;
- struct rb_call_cache *cc = &cd->cc;
+ const struct rb_callcache *cc = cd->cc;
VALUE ret;
calling->block_handler = vm_passed_block_handler(ec);
again:
- switch (cc->me->def->type) {
+ switch (vm_cc_cme(cc)->def->type) {
case VM_METHOD_TYPE_ISEQ:
{
rb_control_frame_t *reg_cfp = ec->cfp;
@@ -147,7 +148,7 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc
}
rb_check_arity(calling->argc, 1, 1);
- ret = rb_ivar_set(calling->recv, cc->me->def->body.attr.id, argv[0]);
+ ret = rb_ivar_set(calling->recv, vm_cc_cme(cc)->def->body.attr.id, argv[0]);
goto success;
case VM_METHOD_TYPE_IVAR:
if (calling->kw_splat &&
@@ -158,7 +159,7 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc
}
rb_check_arity(calling->argc, 0, 0);
- ret = rb_attr_get(calling->recv, cc->me->def->body.attr.id);
+ ret = rb_attr_get(calling->recv, vm_cc_cme(cc)->def->body.attr.id);
goto success;
case VM_METHOD_TYPE_BMETHOD:
ret = vm_call_bmethod_body(ec, calling, cd, argv);
@@ -166,21 +167,21 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc
case VM_METHOD_TYPE_ZSUPER:
case VM_METHOD_TYPE_REFINED:
{
- const rb_method_type_t type = cc->me->def->type;
- VALUE super_class = cc->me->defined_class;
+ const rb_method_type_t type = vm_cc_cme(cc)->def->type;
+ VALUE super_class = vm_cc_cme(cc)->defined_class;
if (type == VM_METHOD_TYPE_ZSUPER) {
super_class = RCLASS_ORIGIN(super_class);
}
- else if (cc->me->def->body.refined.orig_me) {
- CC_SET_ME(cc, refined_method_callable_without_refinement(cc->me));
- goto again;
+ else if (vm_cc_cme(cc)->def->body.refined.orig_me) {
+ vm_cc_cme_set(cc, refined_method_callable_without_refinement(vm_cc_cme(cc)));
+ goto again;
}
super_class = RCLASS_SUPER(super_class);
if (super_class) {
- CC_SET_ME(cc, rb_callable_method_entry(super_class, vm_ci_mid(ci)));
- if (cc->me) {
+ vm_cc_cme_set(cc, rb_callable_method_entry(super_class, vm_ci_mid(ci)));
+ if (vm_cc_cme(cc)) {
RUBY_VM_CHECK_INTS(ec);
goto again;
}
@@ -191,7 +192,7 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc
goto success;
}
case VM_METHOD_TYPE_ALIAS:
- CC_SET_ME(cc, aliased_callable_method_entry(cc->me));
+ vm_cc_cme_set(cc, aliased_callable_method_entry(vm_cc_cme(cc)));
goto again;
case VM_METHOD_TYPE_MISSING:
{
@@ -200,7 +201,7 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc
argv, MISSING_NOENTRY, calling->kw_splat);
}
case VM_METHOD_TYPE_OPTIMIZED:
- switch (cc->me->def->body.optimize_type) {
+ switch (vm_cc_cme(cc)->def->body.optimize_type) {
case OPTIMIZED_METHOD_TYPE_SEND:
ret = send_internal(calling->argc, argv, calling->recv, calling->kw_splat ? CALL_FCALL_KW : CALL_FCALL);
goto success;
@@ -212,13 +213,13 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc
goto success;
}
default:
- rb_bug("vm_call0: unsupported optimized method type (%d)", cc->me->def->body.optimize_type);
+ rb_bug("vm_call0: unsupported optimized method type (%d)", vm_cc_cme(cc)->def->body.optimize_type);
}
break;
case VM_METHOD_TYPE_UNDEF:
break;
}
- rb_bug("vm_call0: unsupported method type (%d)", cc->me->def->type);
+ rb_bug("vm_call0: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
return Qundef;
success:
@@ -359,7 +360,7 @@ struct rescue_funcall_args {
VALUE recv;
ID mid;
rb_execution_context_t *ec;
- const rb_method_entry_t *me;
+ const rb_callable_method_entry_t *cme;
unsigned int respond: 1;
unsigned int respond_to_missing: 1;
int argc;
@@ -373,7 +374,7 @@ check_funcall_exec(VALUE v)
struct rescue_funcall_args *args = (void *)v;
return call_method_entry(args->ec, args->defined_class,
args->recv, idMethodMissing,
- args->me, args->argc, args->argv, args->kw_splat);
+ args->cme, args->argc, args->argv, args->kw_splat);
}
static VALUE
@@ -417,7 +418,7 @@ static VALUE
check_funcall_missing(rb_execution_context_t *ec, VALUE klass, VALUE recv, ID mid, int argc, const VALUE *argv, int respond, VALUE def, int kw_splat)
{
struct rescue_funcall_args args;
- const rb_method_entry_t *me;
+ const rb_callable_method_entry_t *cme;
VALUE ret = Qundef;
ret = basic_obj_respond_to_missing(ec, klass, recv,
@@ -426,8 +427,9 @@ check_funcall_missing(rb_execution_context_t *ec, VALUE klass, VALUE recv, ID mi
args.respond = respond > 0;
args.respond_to_missing = (ret != Qundef);
ret = def;
- me = method_entry_get(klass, idMethodMissing, &args.defined_class);
- if (me && !METHOD_ENTRY_BASIC(me)) {
+ cme = callable_method_entry(klass, idMethodMissing, &args.defined_class);
+
+ if (cme && !METHOD_ENTRY_BASIC(cme)) {
VALUE argbuf, *new_args = ALLOCV_N(VALUE, argbuf, argc+1);
new_args[0] = ID2SYM(mid);
@@ -442,7 +444,7 @@ check_funcall_missing(rb_execution_context_t *ec, VALUE klass, VALUE recv, ID mi
ec->method_missing_reason = MISSING_NOENTRY;
args.ec = ec;
args.recv = recv;
- args.me = me;
+ args.cme = cme;
args.mid = mid;
args.argc = argc + 1;
args.argv = new_args;
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index e25c0dbb33..c6e5c671d6 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -115,9 +115,9 @@ callable_class_p(VALUE klass)
}
static int
-callable_method_entry_p(const rb_callable_method_entry_t *me)
+callable_method_entry_p(const rb_callable_method_entry_t *cme)
{
- if (me == NULL || callable_class_p(me->defined_class)) {
+ if (cme == NULL || callable_class_p(cme->defined_class)) {
return TRUE;
}
else {
@@ -221,8 +221,6 @@ static bool vm_stack_canary_was_born = false;
MJIT_FUNC_EXPORTED void
vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
{
- return;
-
const struct rb_control_frame_struct *reg_cfp = ec->cfp;
const struct rb_iseq_struct *iseq;
@@ -1024,9 +1022,9 @@ vm_search_const_defined_class(const VALUE cbase, ID id)
return 0;
}
-ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, IVC, struct rb_call_cache *, int));
+ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, IVC, const struct rb_callcache *, int));
static inline VALUE
-vm_getivar(VALUE obj, ID id, IVC ic, struct rb_call_cache *cc, int is_attr)
+vm_getivar(VALUE obj, ID id, IVC ic, const struct rb_callcache *cc, int is_attr)
{
#if OPT_IC_FOR_IVAR
VALUE val = Qundef;
@@ -1035,10 +1033,10 @@ vm_getivar(VALUE obj, ID id, IVC ic, struct rb_call_cache *cc, int is_attr)
// frozen?
}
else if (LIKELY(is_attr ?
- RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_unset, cc->aux.index > 0) :
+ RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_unset, vm_cc_attr_index(cc) > 0) :
RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_serial,
ic->ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass)))) {
- st_index_t index = !is_attr ? ic->index : (cc->aux.index - 1);
+ st_index_t index = !is_attr ? ic->index : (vm_cc_attr_index(cc) - 1);
RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
@@ -1076,7 +1074,7 @@ vm_getivar(VALUE obj, ID id, IVC ic, struct rb_call_cache *cc, int is_attr)
ic->ic_serial = RCLASS_SERIAL(RBASIC(obj)->klass);
}
else { /* call_info */
- cc->aux.index = (int)index + 1;
+ vm_cc_attr_index_set(cc, (int)index + 1);
}
if (index < numiv) {
@@ -1124,7 +1122,7 @@ vm_getivar(VALUE obj, ID id, IVC ic, struct rb_call_cache *cc, int is_attr)
}
static inline VALUE
-vm_setivar(VALUE obj, ID id, VALUE val, IVC ic, struct rb_call_cache *cc, int is_attr)
+vm_setivar(VALUE obj, ID id, VALUE val, IVC ic, const struct rb_callcache *cc, int is_attr)
{
#if OPT_IC_FOR_IVAR
rb_check_frozen_internal(obj);
@@ -1135,9 +1133,9 @@ vm_setivar(VALUE obj, ID id, VALUE val, IVC ic, struct rb_call_cache *cc, int is
if (LIKELY(
(!is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_serial, ic->ic_serial == RCLASS_SERIAL(klass))) ||
- ( is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_unset, cc->aux.index > 0)))) {
+ ( is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_unset, vm_cc_attr_index(cc) > 0)))) {
VALUE *ptr = ROBJECT_IVPTR(obj);
- index = !is_attr ? ic->index : cc->aux.index-1;
+ index = !is_attr ? ic->index : vm_cc_attr_index(cc)-1;
if (RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_oorange, index < ROBJECT_NUMIV(obj))) {
RB_OBJ_WRITE(obj, &ptr[index], val);
@@ -1157,7 +1155,7 @@ vm_setivar(VALUE obj, ID id, VALUE val, IVC ic, struct rb_call_cache *cc, int is
rb_raise(rb_eArgError, "too many instance variables");
}
else {
- cc->aux.index = (int)(index + 1);
+ vm_cc_attr_index_set(cc, (int)(index + 1));
}
}
/* fall through */
@@ -1440,210 +1438,199 @@ vm_expandarray(VALUE *sp, VALUE ary, rb_num_t num, int flag)
static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, struct rb_call_data *cd);
-#if __has_attribute(artificial)
-__attribute__((__artificial__))
-#endif
-static inline vm_call_handler
-calccall(const struct rb_call_data *cd, const rb_callable_method_entry_t *me)
+static VALUE vm_mtbl_dump(VALUE klass, ID target_mid);
+
+static struct rb_class_cc_entries *
+vm_ccs_create(VALUE klass, const rb_callable_method_entry_t *cme)
{
- const struct rb_callinfo *ci = cd->ci;
- const struct rb_call_cache *cc = &cd->cc;
+ struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries);
+#if VM_CHECK_MODE > 0
+ ccs->debug_sig = ~(VALUE)ccs;
+#endif
+ ccs->capa = 4;
+ ccs->len = 0;
+ RB_OBJ_WRITE(klass, &ccs->cme, cme);
+ METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme);
+ ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, ccs->capa);
+ return ccs;
+}
- if (UNLIKELY(!me)) {
- RB_DEBUG_COUNTER_INC(mc_miss_by_nome);
- return vm_call_general; /* vm_call_method_nome() situation */
- }
- else if (LIKELY(cc->me != me)) {
- RB_DEBUG_COUNTER_INC(mc_miss_by_distinct);
- return vm_call_general; /* normal cases */
- }
- else if (UNLIKELY(cc->method_serial != me->def->method_serial)) {
- RB_DEBUG_COUNTER_INC(mc_miss_by_refine);
- return vm_call_general; /* cc->me was refined elsewhere */
- }
- /* "Calling a formerly-public method, which is now privatised, with an
- * explicit receiver" is the only situation we have to check here. A
- * formerly-private method now publicised is an absolutely safe thing.
- * Calling a private method without specifying a receiver is also safe. */
- else if ((METHOD_ENTRY_VISI(cc->me) != METHOD_VISI_PUBLIC) &&
- !(vm_ci_flag(ci) & VM_CALL_FCALL)) {
- RB_DEBUG_COUNTER_INC(mc_miss_by_visi);
- return vm_call_general;
+static void
+vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc)
+{
+ if (UNLIKELY(ccs->len == ccs->capa)) {
+ const int nsize = ccs->capa * 2;
+ struct rb_class_cc_entries_entry *nents = ALLOC_N(struct rb_class_cc_entries_entry, nsize);
+ ccs->capa = nsize;
+ MEMCPY(nents, &ccs->entries[0], struct rb_class_cc_entries_entry, ccs->len);
+ ruby_xfree(ccs->entries);
+ ccs->entries = nents;
}
- else {
- RB_DEBUG_COUNTER_INC(mc_miss_spurious);
- (void)RB_DEBUG_COUNTER_INC_IF(mc_miss_reuse_call, cc->call != vm_call_general);
- return cc->call;
+ VM_ASSERT(ccs->len < ccs->capa);
+
+ const int pos = ccs->len++;
+ RB_OBJ_WRITE(klass, &ccs->entries[pos].ci, ci);
+ RB_OBJ_WRITE(klass, &ccs->entries[pos].cc, cc);
+
+ if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) {
+ // for tuning
+ // vm_mtbl_dump(klass, 0);
}
}
-MJIT_FUNC_EXPORTED void
-rb_vm_search_method_slowpath(struct rb_call_data *cd, VALUE klass)
+#if VM_CHECK_MODE > 0
+void
+rb_vm_ccs_dump(struct rb_class_cc_entries *ccs)
{
- const struct rb_callinfo *ci = cd->ci;
- struct rb_call_cache *cc = &cd->cc;
-
- const rb_callable_method_entry_t *me =
- rb_callable_method_entry(klass, vm_ci_mid(ci));
- const vm_call_handler call = calccall(cd, me);
- struct rb_call_cache buf = {
- GET_GLOBAL_METHOD_STATE(),
- { RCLASS_SERIAL(klass) },
- me,
- me ? me->def->method_serial : 0,
- call,
- };
- if (call != vm_call_general) {
- for (int i = 0; i < numberof(cc->class_serial) - 1; i++) {
- buf.class_serial[i + 1] = cc->class_serial[i];
- }
+ fprintf(stderr, "ccs:%p (%d,%d)\n", ccs, ccs->len, ccs->capa);
+ for (int i=0; i<ccs->len; i++) {
+ vm_ci_dump(ccs->entries[i].ci);
+ rp(ccs->entries[i].cc);
}
- MEMCPY(cc, &buf, struct rb_call_cache, 1);
- VM_ASSERT(callable_method_entry_p(cc->me));
-}
-
-/* # Description of what `vm_cache_check_for_class_serial()` is doing #########
- *
- * - Let's assume a `struct rb_call_cache` has its `class_serial` as an array
- * of length 3 (typical situation for 64 bit environments):
- *
- * ```C
- * struct rb_call_cache {
- * rb_serial_t method_state;
- * rb_serial_t class_serial[3];
- * rb_callable_method_entry_t *me;
- * rb_method_definition_struct *def;
- * vm_call_handler call;
- * union { ... snip ... } aux;
- * };
- * ```
- *
- * - Initially, the `cc->class_serial` array is filled with zeros.
- *
- * - If the cache mishits, and if that was due to mc_miss_spurious situation,
- * `rb_vm_search_method_slowpath()` pushes the newest class serial at the
- * leftmost position of the `cc->class_serial`.
- *
- * ```
- * from: +--------------+-----+-----+-----+----+-----+------+-----+
- * | method_state | (x) | (y) | (z) | me | def | call | aux |
- * +--------------+-----+-----+-----+----+-----+------+-----+
- * \ \
- * \ \
- * \ \
- * \ \
- * \ \
- * v v
- * to: +--------------+-----+-----+-----+----+-----+------+-----+
- * | method_state | NEW | (x) | (y) | me | def | call | aux |
- * +--------------+-----+-----+-----+----+-----+------+-----+
- * ^^^
- * fill RCLASS_SERIAL(klass)
- * ```
- *
- * - Eventually, the `cc->class_serial` is filled with a series of classes that
- * share the same method entry for the same call site.
- *
- * - `vm_cache_check_for_class_serial()` can say that the cache now hits if
- * _any_ of the class serials stored inside of `cc->class_serial` is equal to
- * the given `class_serial` value.
- *
- * - It scans the array from left to right, looking for the expected class
- * serial. If it finds that at `cc->class_serial[0]` (this branch
- * probability is 98% according to @shyouhei's experiment), just returns
- * true. If it reaches the end of the array without finding anything,
- * returns false. This is done in the #1 loop below.
- *
- * - What needs to be complicated is when the class serial is found at either
- * `cc->class_serial[1]` or `cc->class_serial[2]`. When that happens, its
- * return value is true because `cc->me` and `cc->call` are valid. But
- * `cc->aux` might be invalid. Also the found class serial is expected to
- * hit next time. In this case we reorder the array and wipe out `cc->aux`.
- * This is done in the #2 loop below.
- *
- * ```
- * from: +--------------+-----+-----+-----+----+-----+------+-----+
- * | method_state | (x) | (y) | (z) | me | def | call | aux |
- * +--------------+-----+-----+-----+----+-----+------+-----+
- * \ \ |
- * \ \ |
- * +- \ --- \ -+
- * | \ \
- * | \ \
- * v v v
- * to: +--------------+-----+-----+-----+----+-----+------+-----+
- * | method_state | (z) | (x) | (y) | me | def | call | 000 |
- * +--------------+-----+-----+-----+----+-----+------+-----+
- * ^^^
- * wipe out
- * ```
- *
- */
-static inline bool
-vm_cache_check_for_class_serial(struct rb_call_cache *cc, rb_serial_t class_serial)
+}
+
+static int
+vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass)
{
- int i;
- rb_serial_t j;
+ VM_ASSERT(vm_ccs_p(ccs));
+ VM_ASSERT(ccs->len <= ccs->capa);
- /* This is the loop #1 in above description. */
- for (i = 0; i < numberof(cc->class_serial); i++) {
- j = cc->class_serial[i];
+ for (int i=0; i<ccs->len; i++) {
+ const struct rb_callinfo *ci = ccs->entries[i].ci;
+ const struct rb_callcache *cc = ccs->entries[i].cc;
- if (! j) {
- break;
- }
- else if (j != class_serial) {
- continue;
- }
- else if (! i) {
- return true;
+ VM_ASSERT(vm_ci_p(ci));
+ VM_ASSERT(vm_ci_mid(ci) == mid);
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(vm_cc_class_check(cc, klass));
+ VM_ASSERT(vm_cc_cme(cc) == ccs->cme);
+ }
+ return TRUE;
+}
+#endif
+
+#ifndef MJIT_HEADER
+static const struct rb_callcache *
+vm_search_cc(VALUE klass, const struct rb_callinfo *ci)
+{
+ ID mid = vm_ci_mid(ci);
+ struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
+ struct rb_class_cc_entries *ccs = NULL;
+
+ if (cc_tbl) {
+ if (rb_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs)) {
+ const int ccs_len = ccs->len;
+ VM_ASSERT(vm_ccs_verify(ccs, mid, klass));
+
+ if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) {
+ rb_vm_ccs_free(ccs);
+ rb_id_table_delete(cc_tbl, mid);
+ ccs = NULL;
+ }
+ else {
+ for (int i=0; i<ccs_len; i++) {
+ const struct rb_callinfo *ccs_ci = ccs->entries[i].ci;
+ const struct rb_callcache *ccs_cc = ccs->entries[i].cc;
+
+ VM_ASSERT(vm_ci_p(ccs_ci));
+ VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache));
+
+ if (ccs_ci == ci) { // TODO: equality
+ RB_DEBUG_COUNTER_INC(cc_found_ccs);
+
+ VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid);
+ VM_ASSERT(ccs_cc->klass == klass);
+ VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc)));
+
+ return ccs_cc;
+ }
+ }
+ }
}
- else {
- goto hit;
+ }
+ else {
+ cc_tbl = RCLASS_CC_TBL(klass) = rb_id_table_create(2);
+ }
+
+ const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
+
+ if (cme == NULL) {
+ // undef or not found: can't cache the information
+ VM_ASSERT(vm_cc_cme(vm_cc_empty()) == NULL);
+ return vm_cc_empty();
+ }
+ else {
+ const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general);
+ METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme);
+
+ if (ccs == NULL) {
+ VM_ASSERT(cc_tbl != NULL);
+ ccs = vm_ccs_create(klass, cme);
+ rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
}
+ vm_ccs_push(klass, ccs, ci, cc);
+
+ VM_ASSERT(vm_cc_cme(cc) != NULL);
+ VM_ASSERT(cme->called_id == mid);
+ VM_ASSERT(vm_cc_cme(cc)->called_id == mid);
+ return cc;
}
+}
- RB_DEBUG_COUNTER_INC(mc_class_serial_miss);
- return false;
+MJIT_FUNC_EXPORTED void
+rb_vm_search_method_slowpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
+{
+ const struct rb_callcache *cc = vm_search_cc(klass, cd->ci);
- hit:
- /* This is the loop #2 in above description. */
- for (; i > 0; i--) {
- cc->class_serial[i] = cc->class_serial[i - 1];
+ if (cd_owner) {
+ RB_OBJ_WRITE(cd_owner, &cd->cc, cc);
+ }
+ else {
+ cd->cc = cc;
}
- cc->class_serial[0] = j;
- MEMZERO(&cc->aux, cc->aux, 1); /* cc->call is valid, but cc->aux might not. */
- return true;
+ VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass);
+ VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc)));
+ VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)));
+ VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci));
}
+#endif
static void
-vm_search_method_fastpath(struct rb_call_data *cd, VALUE klass)
+vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
{
- struct rb_call_cache *cc = &cd->cc;
+ const struct rb_callcache *cc = cd->cc;
#if OPT_INLINE_METHOD_CACHE
- if (LIKELY(RB_DEBUG_COUNTER_INC_UNLESS(mc_global_state_miss,
- GET_GLOBAL_METHOD_STATE() == cc->method_state) &&
- vm_cache_check_for_class_serial(cc, RCLASS_SERIAL(klass)))) {
- /* cache hit! */
- VM_ASSERT(cc->call != NULL);
- RB_DEBUG_COUNTER_INC(mc_inline_hit);
- return;
+ if (LIKELY(vm_cc_class_check(cc, klass))) {
+ if (LIKELY(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc)))) {
+ VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
+ RB_DEBUG_COUNTER_INC(mc_inline_hit);
+ VM_ASSERT(vm_cc_cme(cc) == NULL || // not found
+ (vm_ci_flag(cd->ci) & VM_CALL_SUPER) || // search_super w/ define_method
+ vm_cc_cme(cc)->called_id == vm_ci_mid(cd->ci)); // cme->called_id == ci->mid
+ return;
+ }
+ cd->cc = vm_cc_empty();
+ RB_DEBUG_COUNTER_INC(mc_inline_miss_invalidated);
+ }
+ else {
+ RB_DEBUG_COUNTER_INC(mc_inline_miss_klass);
}
- RB_DEBUG_COUNTER_INC(mc_inline_miss);
#endif
- rb_vm_search_method_slowpath(cd, klass);
+ rb_vm_search_method_slowpath(cd_owner, cd, klass);
+
+ VM_ASSERT(vm_cc_cme(cd->cc) == NULL || vm_cc_cme(cd->cc)->called_id == vm_ci_mid(cd->ci));
}
static void
-vm_search_method(struct rb_call_data *cd, VALUE recv)
+vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
{
VALUE klass = CLASS_OF(recv);
-
VM_ASSERT(klass != Qfalse);
VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
- vm_search_method_fastpath(cd, klass);
+
+ vm_search_method_fastpath(cd_owner, cd, klass);
}
static inline int
@@ -1659,16 +1646,16 @@ check_cfunc(const rb_callable_method_entry_t *me, VALUE (*func)())
}
static inline int
-vm_method_cfunc_is(CALL_DATA cd, VALUE recv, VALUE (*func)())
+vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, VALUE (*func)())
{
- vm_search_method(cd, recv);
- return check_cfunc(cd->cc.me, func);
+ vm_search_method((VALUE)iseq, cd, recv);
+ return check_cfunc(vm_cc_cme(cd->cc), func);
}
static VALUE
-opt_equal_fallback(VALUE recv, VALUE obj, CALL_DATA cd)
+opt_equal_fallback(const rb_iseq_t *iseq, VALUE recv, VALUE obj, CALL_DATA cd)
{
- if (vm_method_cfunc_is(cd, recv, rb_obj_equal)) {
+ if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_equal)) {
return recv == obj ? Qtrue : Qfalse;
}
@@ -1728,7 +1715,7 @@ static
inline
#endif
VALUE
-opt_eq_func(VALUE recv, VALUE obj, CALL_DATA cd)
+opt_eq_func(const rb_iseq_t *iseq, VALUE recv, VALUE obj, CALL_DATA cd)
{
switch (comparable_by_identity(recv, obj)) {
case 1:
@@ -1751,7 +1738,7 @@ opt_eq_func(VALUE recv, VALUE obj, CALL_DATA cd)
}
fallback:
- return opt_equal_fallback(recv, obj, cd);
+ return opt_equal_fallback(iseq, recv, obj, cd);
}
static
@@ -1781,7 +1768,7 @@ opt_eql_func(VALUE recv, VALUE obj, CALL_DATA cd)
}
fallback:
- return opt_equal_fallback(recv, obj, cd);
+ return opt_equal_fallback(NULL, recv, obj, cd);
}
#undef BUILTIN_CLASS_P
#undef EQ_UNREDEFINED_P
@@ -1797,14 +1784,14 @@ rb_equal_opt(VALUE obj1, VALUE obj2)
rb_gc_register_mark_object((VALUE)ci);
}
- struct rb_call_data cd = { .ci = ci, };
- return opt_eq_func(obj1, obj2, &cd);
+ struct rb_call_data cd = { .ci = ci, .cc = vm_cc_empty() };
+ return opt_eq_func(NULL, obj1, obj2, &cd);
}
VALUE
rb_eql_opt(VALUE obj1, VALUE obj2)
{
- struct rb_call_data cd = { .ci = vm_ci_new_id(idEqlP), };
+ struct rb_call_data cd = { .ci = vm_ci_new_id(idEqlP), .cc = vm_cc_empty() };
return opt_eql_func(obj1, obj2, &cd);
}
@@ -1929,11 +1916,11 @@ vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t
{
RB_DEBUG_COUNTER_INC(ccf_iseq_setup_0start);
- struct rb_call_cache *cc = &cd->cc;
- const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
+ const struct rb_callcache *cc = cd->cc;
+ const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
int param = iseq->body->param.size;
int local = iseq->body->local_table_size;
- return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, param, local);
+ return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
}
MJIT_STATIC bool
@@ -2043,8 +2030,8 @@ vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame
struct rb_calling_info *calling,
struct rb_call_data *cd)
{
- const struct rb_call_cache *cc = &cd->cc;
- const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
+ const struct rb_callcache *cc = cd->cc;
+ const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
const int lead_num = iseq->body->param.lead_num;
const int opt = calling->argc - lead_num;
const int opt_num = iseq->body->param.opt_num;
@@ -2064,7 +2051,7 @@ vm_call_iseq_setup_normal_opt_start(rb_execution_context_t *ec, rb_control_frame
}
#endif
- return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, opt_pc, param - delta, local);
+ return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param - delta, local);
}
static VALUE
@@ -2072,8 +2059,8 @@ vm_call_iseq_setup_tailcall_opt_start(rb_execution_context_t *ec, rb_control_fra
struct rb_calling_info *calling,
struct rb_call_data *cd)
{
- const struct rb_call_cache *cc = &cd->cc;
- const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
+ const struct rb_callcache *cc = cd->cc;
+ const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
const int lead_num = iseq->body->param.lead_num;
const int opt = calling->argc - lead_num;
const int opt_pc = (int)iseq->body->param.opt_table[opt];
@@ -2103,12 +2090,12 @@ vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *
struct rb_call_data *cd)
{
const struct rb_callinfo *ci = cd->ci;
- const struct rb_call_cache *cc = &cd->cc;
+ const struct rb_callcache *cc = cd->cc;
VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
- const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
+ const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
const int ci_kw_len = kw_arg->keyword_len;
@@ -2122,7 +2109,7 @@ vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *
int param = iseq->body->param.size;
int local = iseq->body->local_table_size;
- return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, param, local);
+ return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
}
static VALUE
@@ -2131,12 +2118,12 @@ vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t
struct rb_call_data *cd)
{
const struct rb_callinfo *MAYBE_UNUSED(ci) = cd->ci;
- const struct rb_call_cache *cc = &cd->cc;
+ const struct rb_callcache *cc = cd->cc;
VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
- const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
+ const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
VALUE * const argv = cfp->sp - calling->argc;
VALUE * const klocals = argv + kw_param->bits_start - kw_param->num;
@@ -2152,7 +2139,7 @@ vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t
int param = iseq->body->param.size;
int local = iseq->body->local_table_size;
- return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, param, local);
+ return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
}
static inline int
@@ -2160,7 +2147,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
{
const struct rb_callinfo *ci = cd->ci;
- struct rb_call_cache *cc = &cd->cc;
+ const struct rb_callcache *cc = cd->cc;
if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
if (LIKELY(rb_simple_iseq_p(iseq))) {
@@ -2172,7 +2159,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
argument_arity_error(ec, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num);
}
- CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), vm_call_iseq_optimizable_p(cd->ci, &cd->cc));
+ CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), vm_call_iseq_optimizable_p(cd->ci, cd->cc));
return 0;
}
else if (rb_iseq_only_optparam_p(iseq)) {
@@ -2192,12 +2179,12 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
!IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
- !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED));
+ !(METHOD_ENTRY_VISI(vm_cc_cme(cc)) == METHOD_VISI_PROTECTED));
}
else {
CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start,
!IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
- !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED));
+ !(METHOD_ENTRY_VISI(vm_cc_cme(cc)) == METHOD_VISI_PROTECTED));
}
/* initialize opt vars for self-references */
@@ -2225,7 +2212,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
args_setup_kw_parameters(ec, iseq, ci_kws, ci_kw_len, ci_keywords, klocals);
CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_kwarg,
- !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED));
+ !(METHOD_ENTRY_VISI(vm_cc_cme(cc)) == METHOD_VISI_PROTECTED));
return 0;
}
@@ -2238,7 +2225,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
if (klocals[kw_param->num] == INT2FIX(0)) {
/* copy from default_values */
CC_SET_FASTPATH(cc, vm_call_iseq_setup_kwparm_nokwarg,
- !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED));
+ !(METHOD_ENTRY_VISI(vm_cc_cme(cc)) == METHOD_VISI_PROTECTED));
}
return 0;
@@ -2254,11 +2241,11 @@ vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct r
{
RB_DEBUG_COUNTER_INC(ccf_iseq_setup);
- const struct rb_call_cache *cc = &cd->cc;
- const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
+ const struct rb_callcache *cc = cd->cc;
+ const rb_iseq_t *iseq = def_iseq_ptr(vm_cc_cme(cc)->def);
const int param_size = iseq->body->param.size;
const int local_size = iseq->body->local_table_size;
- const int opt_pc = vm_callee_setup_arg(ec, calling, cd, def_iseq_ptr(cc->me->def), cfp->sp - calling->argc, param_size, local_size);
+ const int opt_pc = vm_callee_setup_arg(ec, calling, cd, def_iseq_ptr(vm_cc_cme(cc)->def), cfp->sp - calling->argc, param_size, local_size);
return vm_call_iseq_setup_2(ec, cfp, calling, cd, opt_pc, param_size, local_size);
}
@@ -2267,10 +2254,10 @@ vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct
int opt_pc, int param_size, int local_size)
{
const struct rb_callinfo *ci = cd->ci;
- const struct rb_call_cache *cc = &cd->cc;
+ const struct rb_callcache *cc = cd->cc;
if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
- return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, opt_pc, param_size, local_size);
+ return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), opt_pc, param_size, local_size);
}
else {
return vm_call_iseq_setup_tailcall(ec, cfp, calling, cd, opt_pc);
@@ -2298,10 +2285,10 @@ static inline VALUE
vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd,
int opt_pc)
{
- const struct rb_call_cache *cc = &cd->cc;
+ const struct rb_callcache *cc = cd->cc;
unsigned int i;
VALUE *argv = cfp->sp - calling->argc;
- const rb_callable_method_entry_t *me = cc->me;
+ const rb_callable_method_entry_t *me = vm_cc_cme(cc);
const rb_iseq_t *iseq = def_iseq_ptr(me->def);
VALUE *src_argv = argv;
VALUE *sp_orig, *sp;
@@ -2501,9 +2488,9 @@ static VALUE
vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
{
const struct rb_callinfo *ci = cd->ci;
- const struct rb_call_cache *cc = &cd->cc;
+ const struct rb_callcache *cc = cd->cc;
VALUE val;
- const rb_callable_method_entry_t *me = cc->me;
+ const rb_callable_method_entry_t *me = vm_cc_cme(cc);
const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
int len = cfunc->argc;
@@ -2553,20 +2540,20 @@ vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb
static VALUE
vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
{
- struct rb_call_cache *cc = &cd->cc;
+ const struct rb_callcache *cc = cd->cc;
RB_DEBUG_COUNTER_INC(ccf_ivar);
cfp->sp -= 1;
- return vm_getivar(calling->recv, cc->me->def->body.attr.id, NULL, cc, TRUE);
+ return vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, cc, TRUE);
}
static VALUE
vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
{
- struct rb_call_cache *cc = &cd->cc;
+ const struct rb_callcache *cc = cd->cc;
RB_DEBUG_COUNTER_INC(ccf_attrset);
VALUE val = *(cfp->sp - 1);
cfp->sp -= 2;
- return vm_setivar(calling->recv, cc->me->def->body.attr.id, val, NULL, cc, 1);
+ return vm_setivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, val, NULL, cc, 1);
}
static inline VALUE
@@ -2574,11 +2561,11 @@ vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling
{
rb_proc_t *proc;
VALUE val;
- const struct rb_call_cache *cc = &cd->cc;
+ const struct rb_callcache *cc = cd->cc;
/* control block frame */
- GetProcPtr(cc->me->def->body.bmethod.proc, proc);
- val = rb_vm_invoke_bmethod(ec, proc, calling->recv, calling->argc, argv, calling->kw_splat, calling->block_handler, cc->me);
+ GetProcPtr(vm_cc_cme(cc)->def->body.bmethod.proc, proc);
+ val = rb_vm_invoke_bmethod(ec, proc, calling->recv, calling->argc, argv, calling->kw_splat, calling->block_handler, vm_cc_cme(cc));
return val;
}
@@ -2601,6 +2588,65 @@ vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_c
return vm_call_bmethod_body(ec, calling, cd, argv);
}
+static VALUE
+find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
+{
+ VALUE klass = current_class;
+
+ /* for prepended Module, then start from cover class */
+ if (RB_TYPE_P(klass, T_ICLASS) && FL_TEST(klass, RICLASS_IS_ORIGIN)) klass = RBASIC_CLASS(klass);
+
+ while (RTEST(klass)) {
+ VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
+ if (owner == target_owner) {
+ return klass;
+ }
+ klass = RCLASS_SUPER(klass);
+ }
+
+ return current_class; /* maybe module function */
+}
+
+static const rb_callable_method_entry_t *
+aliased_callable_method_entry(const rb_callable_method_entry_t *me)
+{
+ const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
+ const rb_callable_method_entry_t *cme;
+
+ if (orig_me->defined_class == 0) {
+ VALUE defined_class = find_defined_class_by_owner(me->defined_class, orig_me->owner);
+ VM_ASSERT(RB_TYPE_P(orig_me->owner, T_MODULE));
+ cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
+
+ if (me->def->alias_count + me->def->complemented_count == 0) {
+ RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
+ }
+ else {
+ rb_method_definition_t *def =
+ rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
+ rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
+ }
+ }
+ else {
+ cme = (const rb_callable_method_entry_t *)orig_me;
+ }
+
+ VM_ASSERT(callable_method_entry_p(cme));
+ return cme;
+}
+
+static VALUE
+vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
+{
+ const rb_callable_method_entry_t *cme = aliased_callable_method_entry(vm_cc_cme(cd->cc));
+ struct rb_callcache cc_body;
+ struct rb_call_data cd_body = {
+ .ci = cd->ci,
+ .cc = vm_cc_fill(&cc_body, Qundef, cme, NULL),
+ };
+ return vm_call_method_each_type(ec, cfp, calling, &cd_body);
+}
+
static enum method_missing_reason
ci_missing_reason(const struct rb_callinfo *ci)
{
@@ -2619,12 +2665,10 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct
int i;
VALUE sym;
ID mid;
- const struct rb_callinfo *orig_ci = orig_cd->ci;
- const struct rb_call_cache *orig_cc = &orig_cd->cc;
- struct rb_call_cache *cc;
struct rb_call_data cd;
+ enum method_missing_reason missing_reason = 0;
- CALLER_SETUP_ARG(reg_cfp, calling, orig_ci);
+ CALLER_SETUP_ARG(reg_cfp, calling, orig_cd->ci);
i = calling->argc - 1;
@@ -2632,9 +2676,6 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct
rb_raise(rb_eArgError, "no method name given");
}
- cd.cc = *orig_cc;
- cc = &cd.cc;
-
sym = TOPN(i);
if (!(mid = rb_check_id(&sym))) {
@@ -2642,12 +2683,12 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct
VALUE exc =
rb_make_no_method_exception(rb_eNoMethodError, 0, calling->recv,
rb_long2int(calling->argc), &TOPN(i),
- vm_ci_flag(orig_ci) & (VM_CALL_FCALL|VM_CALL_VCALL));
+ vm_ci_flag(orig_cd->ci) & (VM_CALL_FCALL|VM_CALL_VCALL));
rb_exc_raise(exc);
}
TOPN(i) = rb_str_intern(sym);
mid = idMethodMissing;
- ec->method_missing_reason = cc->aux.method_missing_reason = ci_missing_reason(orig_ci);
+ missing_reason = ci_missing_reason(orig_cd->ci);
}
else {
/* shift arguments */
@@ -2658,10 +2699,14 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct
DEC_SP(1);
}
- CC_SET_ME(cc, rb_callable_method_entry_with_refinements(CLASS_OF(calling->recv), mid, NULL));
unsigned int new_flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
- cd.ci = vm_ci_new_runtime(mid, new_flag, 0 /* not accessed (calling->argc is used) */, vm_ci_kwarg(orig_ci));
-
+ cd.ci = vm_ci_new_runtime(mid, new_flag, 0 /* not accessed (calling->argc is used) */, vm_ci_kwarg(orig_cd->ci));
+ struct rb_callcache cc_body;
+ cd.cc = vm_cc_fill(&cc_body,
+ Qundef,
+ rb_callable_method_entry_with_refinements(CLASS_OF(calling->recv), mid, NULL),
+ NULL);
+ if (missing_reason != 0) vm_cc_method_missing_reason_set(cd.cc, missing_reason);
return vm_call_method(ec, reg_cfp, calling, (CALL_DATA)&cd);
}
@@ -2706,20 +2751,19 @@ vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
}
else {
calling->recv = rb_vm_bh_to_procval(ec, block_handler);
- vm_search_method(cd, calling->recv);
+ vm_search_method((VALUE)reg_cfp->iseq, cd, calling->recv);
return vm_call_general(ec, reg_cfp, calling, cd);
}
}
static VALUE
-vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, struct rb_call_data *orig_cd)
+vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling,
+ const struct rb_callinfo *orig_ci, enum method_missing_reason reason)
{
RB_DEBUG_COUNTER_INC(ccf_method_missing);
- const struct rb_callinfo *orig_ci = orig_cd->ci;
- const struct rb_call_cache *orig_cc = &orig_cd->cc;
VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
- struct rb_call_data cd = *orig_cd;
+ struct rb_call_data cd;
unsigned int argc;
CALLER_SETUP_ARG(reg_cfp, calling, orig_ci);
@@ -2727,8 +2771,11 @@ vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
unsigned int flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
cd.ci = vm_ci_new_runtime(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci));
- cd.cc.me = rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv),
- idMethodMissing, NULL);
+ struct rb_callcache cc_body;
+ cd.cc = vm_cc_fill(&cc_body,
+ Qundef,
+ rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL),
+ vm_call_general);
calling->argc = argc;
@@ -2741,29 +2788,39 @@ vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
argv[0] = ID2SYM(vm_ci_mid(orig_ci));
INC_SP(1);
- ec->method_missing_reason = orig_cc->aux.method_missing_reason;
+ ec->method_missing_reason = reason;
return vm_call_method(ec, reg_cfp, calling, &cd);
}
+static VALUE
+vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
+ struct rb_calling_info *calling, struct rb_call_data *cd)
+{
+ return vm_call_method_missing_body(ec, reg_cfp, calling, cd->ci, vm_cc_cmethod_missing_reason(cd->cc));
+}
+
static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
static VALUE
vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd, VALUE klass)
{
- RB_DEBUG_COUNTER_INC(ccf_method_missing);
-
- const struct rb_callinfo *ci = cd->ci;
- struct rb_call_cache *cc = &cd->cc;
klass = RCLASS_SUPER(klass);
- CC_SET_ME(cc, klass ? rb_callable_method_entry(klass, vm_ci_mid(ci)) : NULL);
- if (!cc->me) {
+ const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, vm_ci_mid(cd->ci)) : NULL;
+ if (cme == NULL) {
return vm_call_method_nome(ec, cfp, calling, cd);
}
- if (cc->me->def->type == VM_METHOD_TYPE_REFINED &&
- cc->me->def->body.refined.orig_me) {
- CC_SET_ME(cc, refined_method_callable_without_refinement(cc->me));
+ if (cme->def->type == VM_METHOD_TYPE_REFINED &&
+ cme->def->body.refined.orig_me) {
+ cme = refined_method_callable_without_refinement(cme);
}
- return vm_call_method_each_type(ec, cfp, calling, cd);
+
+ struct rb_callcache cc_body;
+ struct rb_call_data cd_body = {
+ .ci = cd->ci,
+ .cc = vm_cc_fill(&cc_body, Qundef, cme, NULL),
+ };
+ return vm_call_method_each_type(ec, cfp, calling, &cd_body);
+
}
static inline VALUE
@@ -2795,53 +2852,6 @@ current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
return cfp;
}
-static VALUE
-find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
-{
- VALUE klass = current_class;
-
- /* for prepended Module, then start from cover class */
- if (RB_TYPE_P(klass, T_ICLASS) && FL_TEST(klass, RICLASS_IS_ORIGIN)) klass = RBASIC_CLASS(klass);
-
- while (RTEST(klass)) {
- VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
- if (owner == target_owner) {
- return klass;
- }
- klass = RCLASS_SUPER(klass);
- }
-
- return current_class; /* maybe module function */
-}
-
-static const rb_callable_method_entry_t *
-aliased_callable_method_entry(const rb_callable_method_entry_t *me)
-{
- const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
- const rb_callable_method_entry_t *cme;
-
- if (orig_me->defined_class == 0) {
- VALUE defined_class = find_defined_class_by_owner(me->defined_class, orig_me->owner);
- VM_ASSERT(RB_TYPE_P(orig_me->owner, T_MODULE));
- cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
-
- if (me->def->alias_count + me->def->complemented_count == 0) {
- RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
- }
- else {
- rb_method_definition_t *def =
- rb_method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id);
- rb_method_definition_set((rb_method_entry_t *)me, def, (void *)cme);
- }
- }
- else {
- cme = (const rb_callable_method_entry_t *)orig_me;
- }
-
- VM_ASSERT(callable_method_entry_p(cme));
- return cme;
-}
-
static const rb_callable_method_entry_t *
refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
{
@@ -2865,57 +2875,78 @@ refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
return cme;
}
-static int
-search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, ID mid, struct rb_call_cache *cc)
+static const rb_callable_method_entry_t *
+search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_call_data *cd)
{
+ ID mid = vm_ci_mid(cd->ci);
const rb_cref_t *cref = vm_get_cref(cfp->ep);
+ const struct rb_callcache * const cc = cd->cc;
+ const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
for (; cref; cref = CREF_NEXT(cref)) {
- const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), cc->me->owner);
+ const VALUE refinement = find_refinement(CREF_REFINEMENTS(cref), vm_cc_cme(cc)->owner);
if (NIL_P(refinement)) continue;
const rb_callable_method_entry_t *const ref_me =
rb_callable_method_entry(refinement, mid);
if (ref_me) {
- if (cc->call == vm_call_super_method) {
+ if (vm_cc_call(cc) == vm_call_super_method) {
const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
continue;
}
}
- if (cc->me->def->type != VM_METHOD_TYPE_REFINED ||
- cc->me->def != ref_me->def) {
- CC_SET_ME(cc, ref_me);
+
+ if (cme->def->type != VM_METHOD_TYPE_REFINED ||
+ cme->def != ref_me->def) {
+ cme = ref_me;
}
if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
- return TRUE;
+ return cme;
}
}
else {
- CC_SET_ME(cc, NULL);
- return FALSE;
+ return NULL;
}
}
- if (cc->me->def->body.refined.orig_me) {
- CC_SET_ME(cc, refined_method_callable_without_refinement(cc->me));
+ if (vm_cc_cme(cc)->def->body.refined.orig_me) {
+ return refined_method_callable_without_refinement(vm_cc_cme(cc));
}
else {
- VALUE klass = RCLASS_SUPER(cc->me->defined_class);
- CC_SET_ME(cc, klass ? rb_callable_method_entry(klass, mid) : NULL);
+ VALUE klass = RCLASS_SUPER(vm_cc_cme(cc)->defined_class);
+ const rb_callable_method_entry_t *cme = klass ? rb_callable_method_entry(klass, mid) : NULL;
+ return cme;
+ }
+}
+
+static VALUE
+vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
+{
+ const rb_callable_method_entry_t *cme = search_refined_method(ec, cfp, cd);
+
+ if (cme != NULL) {
+ struct rb_callcache cc_body;
+ struct rb_call_data cd_body = {
+ .ci = cd->ci,
+ .cc = vm_cc_fill(&cc_body, Qundef, cme, NULL),
+ };
+ return vm_call_method(ec, cfp, calling, &cd_body);
+ }
+ else {
+ return vm_call_method_nome(ec, cfp, calling, cd);
}
- return TRUE;
}
static VALUE
vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
{
const struct rb_callinfo *ci = cd->ci;
- struct rb_call_cache *cc = &cd->cc;
+ const struct rb_callcache *cc = cd->cc;
- switch (cc->me->def->type) {
+ switch (vm_cc_cme(cc)->def->type) {
case VM_METHOD_TYPE_ISEQ:
CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
return vm_call_iseq_setup(ec, cfp, calling, cd);
@@ -2930,20 +2961,20 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
rb_check_arity(calling->argc, 1, 1);
- cc->aux.index = 0;
- CC_SET_FASTPATH(cc, vm_call_attrset, !((vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT) || (vm_ci_flag(ci) & VM_CALL_KWARG)));
+ vm_cc_attr_index_set(cc, 0);
+ CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG)));
return vm_call_attrset(ec, cfp, calling, cd);
case VM_METHOD_TYPE_IVAR:
CALLER_SETUP_ARG(cfp, calling, ci);
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
rb_check_arity(calling->argc, 0, 0);
- cc->aux.index = 0;
- CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT));
+ vm_cc_attr_index_set(cc, 0);
+ CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT)));
return vm_call_ivar(ec, cfp, calling, cd);
case VM_METHOD_TYPE_MISSING:
- cc->aux.method_missing_reason = 0;
+ vm_cc_method_missing_reason_set(cc, 0);
CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
return vm_call_method_missing(ec, cfp, calling, cd);
@@ -2952,12 +2983,11 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
return vm_call_bmethod(ec, cfp, calling, cd);
case VM_METHOD_TYPE_ALIAS:
- CC_SET_ME(cc, aliased_callable_method_entry(cc->me));
- VM_ASSERT(cc->me != NULL);
- return vm_call_method_each_type(ec, cfp, calling, cd);
+ CC_SET_FASTPATH(cc, vm_call_alias, TRUE);
+ return vm_call_alias(ec, cfp, calling, cd);
case VM_METHOD_TYPE_OPTIMIZED:
- switch (cc->me->def->body.optimize_type) {
+ switch (vm_cc_cme(cc)->def->body.optimize_type) {
case OPTIMIZED_METHOD_TYPE_SEND:
CC_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
return vm_call_opt_send(ec, cfp, calling, cd);
@@ -2969,23 +2999,22 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
return vm_call_opt_block_call(ec, cfp, calling, cd);
default:
rb_bug("vm_call_method: unsupported optimized method type (%d)",
- cc->me->def->body.optimize_type);
+ vm_cc_cme(cc)->def->body.optimize_type);
}
case VM_METHOD_TYPE_UNDEF:
break;
case VM_METHOD_TYPE_ZSUPER:
- return vm_call_zsuper(ec, cfp, calling, cd, RCLASS_ORIGIN(cc->me->defined_class));
+ return vm_call_zsuper(ec, cfp, calling, cd, RCLASS_ORIGIN(vm_cc_cme(cc)->defined_class));
case VM_METHOD_TYPE_REFINED:
- if (search_refined_method(ec, cfp, vm_ci_mid(ci), cc))
- return vm_call_method(ec, cfp, calling, cd);
- else
- return vm_call_method_nome(ec, cfp, calling, cd);
+ // CC_SET_FASTPATH(cc, vm_call_refined, TRUE);
+ // should not set FASTPATH because vm_call_refined check cc->call.
+ return vm_call_refined(ec, cfp, calling, cd);
}
- rb_bug("vm_call_method: unsupported method type (%d)", cc->me->def->type);
+ rb_bug("vm_call_method: unsupported method type (%d)", vm_cc_cme(cc)->def->type);
}
NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
@@ -2995,7 +3024,6 @@ vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct
{
/* method missing */
const struct rb_callinfo *ci = cd->ci;
- struct rb_call_cache *cc = &cd->cc;
const int stat = ci_missing_reason(ci);
if (vm_ci_mid(ci) == idMethodMissing) {
@@ -3004,9 +3032,7 @@ vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct
vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
}
else {
- cc->aux.method_missing_reason = stat;
- CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
- return vm_call_method_missing(ec, cfp, calling, cd);
+ return vm_call_method_missing_body(ec, cfp, calling, cd->ci, stat);
}
}
@@ -3014,12 +3040,12 @@ static inline VALUE
vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
{
const struct rb_callinfo *ci = cd->ci;
- struct rb_call_cache *cc = &cd->cc;
+ const struct rb_callcache *cc = cd->cc;
- VM_ASSERT(callable_method_entry_p(cc->me));
+ VM_ASSERT(callable_method_entry_p(vm_cc_cme(cc)));
- if (cc->me != NULL) {
- switch (METHOD_ENTRY_VISI(cc->me)) {
+ if (vm_cc_cme(cc) != NULL) {
+ switch (METHOD_ENTRY_VISI(vm_cc_cme(cc))) {
case METHOD_VISI_PUBLIC: /* likely */
return vm_call_method_each_type(ec, cfp, calling, cd);
@@ -3028,7 +3054,7 @@ vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca
enum method_missing_reason stat = MISSING_PRIVATE;
if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
- cc->aux.method_missing_reason = stat;
+ vm_cc_method_missing_reason_set(cc, stat);
CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
return vm_call_method_missing(ec, cfp, calling, cd);
}
@@ -3036,15 +3062,19 @@ vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca
case METHOD_VISI_PROTECTED:
if (!(vm_ci_flag(ci) & VM_CALL_OPT_SEND)) {
- if (!rb_obj_is_kind_of(cfp->self, cc->me->defined_class)) {
- cc->aux.method_missing_reason = MISSING_PROTECTED;
+ if (!rb_obj_is_kind_of(cfp->self, vm_cc_cme(cc)->defined_class)) {
+ vm_cc_method_missing_reason_set(cc, MISSING_PROTECTED);
return vm_call_method_missing(ec, cfp, calling, cd);
}
else {
/* caching method info to dummy cc */
- VM_ASSERT(cc->me != NULL);
- struct rb_call_data cd_entry = *cd;
- return vm_call_method_each_type(ec, cfp, calling, &cd_entry);
+ VM_ASSERT(vm_cc_cme(cc) != NULL);
+ struct rb_callcache cc_body;
+ struct rb_call_data cd_body = {
+ .ci = ci,
+ .cc = vm_cc_fill(&cc_body, cc->klass, vm_cc_cme(cc), vm_cc_call(cc)),
+ };
+ return vm_call_method_each_type(ec, cfp, calling, &cd_body);
}
}
return vm_call_method_each_type(ec, cfp, calling, cd);
@@ -3071,8 +3101,8 @@ vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, st
RB_DEBUG_COUNTER_INC(ccf_super_method);
/* this check is required to distinguish with other functions. */
- const struct rb_call_cache *cc = &cd->cc;
- if (cc->call != vm_call_super_method) rb_bug("bug");
+ const struct rb_callcache *cc = cd->cc;
+ if (vm_cc_call(cc) != vm_call_super_method) rb_bug("bug");
return vm_call_method(ec, reg_cfp, calling, cd);
}
@@ -3145,30 +3175,34 @@ vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *c
if (!klass) {
/* bound instance method of module */
- cd->cc.aux.method_missing_reason = MISSING_SUPER;
- CC_SET_FASTPATH(&cd->cc, vm_call_method_missing, TRUE);
+ const struct rb_callcache *cc = vm_cc_new(klass, NULL, vm_call_method_missing);
+ RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
}
else {
- struct rb_call_cache *cc = &cd->cc;
-#if OPT_INLINE_METHOD_CACHE
- /* Unlike normal method search, we only consider the first class
- * serial. Since we're testing defined_class rather than receiver,
- * there's only one valid "warm" value. */
- if (LIKELY(RB_DEBUG_COUNTER_INC_UNLESS(mc_global_state_miss,
- GET_GLOBAL_METHOD_STATE() == cc->method_state) &&
- cc->class_serial[0] == RCLASS_SERIAL(klass)) &&
- cc->me && vm_ci_mid(cd->ci) == cc->me->called_id) {
- VM_ASSERT(cc->call != NULL);
- RB_DEBUG_COUNTER_INC(mc_inline_hit);
- return;
- }
-#endif
-
- CC_SET_ME(cc, rb_callable_method_entry(klass, vm_ci_mid(cd->ci)));
- CC_SET_FASTPATH(cc, vm_call_super_method, TRUE);
-
- cc->method_state = GET_GLOBAL_METHOD_STATE();
- cc->class_serial[0] = RCLASS_SERIAL(klass);
+ vm_search_method_fastpath((VALUE)reg_cfp->iseq, cd, klass);
+ const rb_callable_method_entry_t *cached_cme = vm_cc_cme(cd->cc);
+
+ ID mid = vm_ci_mid(cd->ci);
+
+ // define_method can cache for different method id
+ if (cached_cme == NULL) {
+ // temporary CC. revisit it
+ static const struct rb_callcache *empty_cc_for_super = NULL;
+ if (empty_cc_for_super == NULL) {
+ empty_cc_for_super = vm_cc_new(0, NULL, vm_call_super_method);
+ FL_SET_RAW(empty_cc_for_super, VM_CALLCACHE_UNMARKABLE);
+ rb_gc_register_mark_object((VALUE)empty_cc_for_super);
+ }
+ RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, empty_cc_for_super);
+ }
+ else if (cached_cme->called_id != mid) {
+ const rb_callable_method_entry_t *cme = rb_callable_method_entry(klass, mid);
+ const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_super_method);
+ RB_OBJ_WRITE(reg_cfp->iseq, &cd->cc, cc);
+ }
+ else {
+ vm_cc_call_set(cd->cc, vm_call_super_method);
+ }
}
}
@@ -3958,7 +3992,7 @@ vm_search_method_wrap(
struct rb_call_data *cd,
VALUE recv)
{
- vm_search_method(cd, recv);
+ vm_search_method((VALUE)reg_cfp->iseq, cd, recv);
}
static void
@@ -3999,9 +4033,8 @@ vm_sendish(
struct rb_call_data *cd,
VALUE recv))
{
- const struct rb_callinfo *ci = cd->ci;
- CALL_CACHE cc = &cd->cc;
VALUE val;
+ const struct rb_callinfo *ci = cd->ci;
int argc = vm_ci_argc(ci);
VALUE recv = TOPN(argc);
struct rb_calling_info calling;
@@ -4012,8 +4045,9 @@ vm_sendish(
calling.argc = argc;
method_explorer(GET_CFP(), cd, recv);
+ const struct rb_callcache *cc = cd->cc;
- val = cc->call(ec, GET_CFP(), &calling, cd);
+ val = vm_cc_call(cc)(ec, GET_CFP(), &calling, cd);
if (val != Qundef) {
return val; /* CFUNC normal return */
@@ -4356,10 +4390,10 @@ vm_opt_mod(VALUE recv, VALUE obj)
}
static VALUE
-vm_opt_neq(CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
+vm_opt_neq(const rb_iseq_t *iseq, CALL_DATA cd, CALL_DATA cd_eq, VALUE recv, VALUE obj)
{
- if (vm_method_cfunc_is(cd, recv, rb_obj_not_equal)) {
- VALUE val = opt_eq_func(recv, obj, cd_eq);
+ if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not_equal)) {
+ VALUE val = opt_eq_func(iseq, recv, obj, cd_eq);
if (val != Qundef) {
return RTEST(val) ? Qfalse : Qtrue;
@@ -4630,13 +4664,13 @@ vm_opt_empty_p(VALUE recv)
VALUE rb_false(VALUE obj);
static VALUE
-vm_opt_nil_p(CALL_DATA cd, VALUE recv)
+vm_opt_nil_p(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
{
if (recv == Qnil &&
BASIC_OP_UNREDEFINED_P(BOP_NIL_P, NIL_REDEFINED_OP_FLAG)) {
return Qtrue;
}
- else if (vm_method_cfunc_is(cd, recv, rb_false)) {
+ else if (vm_method_cfunc_is(iseq, cd, recv, rb_false)) {
return Qfalse;
}
else {
@@ -4692,9 +4726,9 @@ vm_opt_succ(VALUE recv)
}
static VALUE
-vm_opt_not(CALL_DATA cd, VALUE recv)
+vm_opt_not(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv)
{
- if (vm_method_cfunc_is(cd, recv, rb_obj_not)) {
+ if (vm_method_cfunc_is(iseq, cd, recv, rb_obj_not)) {
return RTEST(recv) ? Qfalse : Qtrue;
}
else {
diff --git a/vm_insnhelper.h b/vm_insnhelper.h
index e9337b82a8..07b38ea9d9 100644
--- a/vm_insnhelper.h
+++ b/vm_insnhelper.h
@@ -121,20 +121,13 @@ enum vm_regan_acttype {
*/
static inline void
-CC_SET_FASTPATH(CALL_CACHE cc, vm_call_handler func, bool enabled)
+CC_SET_FASTPATH(const struct rb_callcache *cc, vm_call_handler func, bool enabled)
{
if (LIKELY(enabled)) {
- cc->call = func;
+ vm_cc_call_set(cc, func);
}
}
-static inline void
-CC_SET_ME(CALL_CACHE cc, const rb_callable_method_entry_t *me)
-{
- cc->me = me;
- cc->method_serial = me ? me->def->method_serial : 0;
-}
-
#define GET_BLOCK_HANDLER() (GET_LEP()[VM_ENV_DATA_INDEX_SPECVAL])
/**********************************************************/
@@ -258,10 +251,10 @@ THROW_DATA_CONSUMED_SET(struct vm_throw_data *obj)
/* If this returns true, an optimized function returned by `vm_call_iseq_setup_func`
can be used as a fastpath. */
static bool
-vm_call_iseq_optimizable_p(const struct rb_callinfo *ci, const struct rb_call_cache *cc)
+vm_call_iseq_optimizable_p(const struct rb_callinfo *ci, const struct rb_callcache *cc)
{
return !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
- !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED);
+ !(METHOD_ENTRY_VISI(vm_cc_cme(cc)) == METHOD_VISI_PROTECTED);
}
#endif /* RUBY_INSNHELPER_H */
diff --git a/vm_method.c b/vm_method.c
index 4a82cc3e82..123298c6af 100644
--- a/vm_method.c
+++ b/vm_method.c
@@ -6,25 +6,6 @@
#define METHOD_DEBUG 0
-#if OPT_GLOBAL_METHOD_CACHE
-#ifndef GLOBAL_METHOD_CACHE_SIZE
-#define GLOBAL_METHOD_CACHE_SIZE 0x800
-#endif
-#define LSB_ONLY(x) ((x) & ~((x) - 1))
-#define POWER_OF_2_P(x) ((x) == LSB_ONLY(x))
-#if !POWER_OF_2_P(GLOBAL_METHOD_CACHE_SIZE)
-# error GLOBAL_METHOD_CACHE_SIZE must be power of 2
-#endif
-#ifndef GLOBAL_METHOD_CACHE_MASK
-#define GLOBAL_METHOD_CACHE_MASK (GLOBAL_METHOD_CACHE_SIZE-1)
-#endif
-
-#define GLOBAL_METHOD_CACHE_KEY(c,m) ((((c)>>3)^(m))&(global_method_cache.mask))
-#define GLOBAL_METHOD_CACHE(c,m) (global_method_cache.entries + GLOBAL_METHOD_CACHE_KEY(c,m))
-#else
-#define GLOBAL_METHOD_CACHE(c,m) (rb_bug("global method cache disabled improperly"), NULL)
-#endif
-
static int vm_redefinition_check_flag(VALUE klass);
static void rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass);
@@ -37,50 +18,108 @@ static void rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VAL
#define singleton_undefined idSingleton_method_undefined
#define attached id__attached__
-struct cache_entry {
- rb_serial_t method_state;
- rb_serial_t class_serial;
- ID mid;
- rb_method_entry_t* me;
- VALUE defined_class;
-};
-
-#if OPT_GLOBAL_METHOD_CACHE
-static struct {
- unsigned int size;
- unsigned int mask;
- struct cache_entry *entries;
-} global_method_cache = {
- GLOBAL_METHOD_CACHE_SIZE,
- GLOBAL_METHOD_CACHE_MASK,
-};
-#endif
-
#define ruby_running (GET_VM()->running)
/* int ruby_running = 0; */
-static void
-rb_class_clear_method_cache(VALUE klass, VALUE arg)
+static enum rb_id_table_iterator_result
+vm_ccs_dump_i(ID mid, VALUE val, void *data)
{
- rb_serial_t old_serial = *(rb_serial_t *)arg;
- if (RCLASS_SERIAL(klass) > old_serial) {
- return;
+ const struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)val;
+ fprintf(stderr, " | %s (%d) ", rb_id2name(mid), ccs->len);
+ rp(ccs->cme);
+
+ for (int i=0; i<ccs->len; i++) {
+ fprintf(stderr, " | [%d] ", i); vm_ci_dump(ccs->entries[i].ci);
+ rp_m( " | ", ccs->entries[i].cc);
}
- mjit_remove_class_serial(RCLASS_SERIAL(klass));
- RCLASS_SERIAL(klass) = rb_next_class_serial();
+ return ID_TABLE_CONTINUE;
+}
- if (BUILTIN_TYPE(klass) == T_ICLASS) {
- struct rb_id_table *table = RCLASS_CALLABLE_M_TBL(klass);
- if (table) {
- rb_id_table_clear(table);
- }
+static void
+vm_ccs_dump(VALUE klass, ID target_mid)
+{
+ struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
+ if (cc_tbl) {
+ const struct rb_class_cc_entries *ccs;
+ if (target_mid) {
+ if (rb_id_table_lookup(cc_tbl, target_mid, (VALUE *)&ccs)) {
+ fprintf(stderr, " [CCTB] %p\n", cc_tbl);
+ vm_ccs_dump_i(target_mid, (VALUE)ccs, NULL);
+ }
+ }
+ else {
+ fprintf(stderr, " [CCTB] %p\n", cc_tbl);
+ rb_id_table_foreach(cc_tbl, vm_ccs_dump_i, (void *)target_mid);
+ }
}
- else {
- VM_ASSERT(RCLASS_CALLABLE_M_TBL(klass) == 0);
+}
+
+static enum rb_id_table_iterator_result
+vm_cme_dump_i(ID mid, VALUE val, void *data)
+{
+ ID target_mid = (ID)data;
+ if (target_mid == 0 || mid == target_mid) {
+ rp_m(" > ", val);
}
+ return ID_TABLE_CONTINUE;
+}
- rb_class_foreach_subclass(klass, rb_class_clear_method_cache, arg);
+static VALUE
+vm_mtbl_dump(VALUE klass, ID target_mid)
+{
+ fprintf(stderr, "# vm_mtbl\n");
+ while (klass) {
+ rp_m(" -> ", klass);
+ rb_method_entry_t *me;
+
+ if (RCLASS_M_TBL(klass)) {
+ if (target_mid != 0) {
+ if (rb_id_table_lookup(RCLASS_M_TBL(klass), target_mid, (VALUE *)&me)) {
+ rp_m(" [MTBL] ", me);
+ }
+ }
+ else {
+ fprintf(stderr, " ## RCLASS_M_TBL (%p)\n", RCLASS_M_TBL(klass));
+ rb_id_table_foreach(RCLASS_M_TBL(klass), vm_cme_dump_i, NULL);
+ }
+ }
+ else {
+ fprintf(stderr, " MTBL: NULL\n");
+ }
+ if (RCLASS_CALLABLE_M_TBL(klass)) {
+ if (target_mid != 0) {
+ if (rb_id_table_lookup(RCLASS_CALLABLE_M_TBL(klass), target_mid, (VALUE *)&me)) {
+ rp_m(" [CM**] ", me);
+ }
+ }
+ else {
+ fprintf(stderr, " ## RCLASS_CALLABLE_M_TBL\n");
+ rb_id_table_foreach(RCLASS_CALLABLE_M_TBL(klass), vm_cme_dump_i, NULL);
+ }
+ }
+ if (RCLASS_CC_TBL(klass)) {
+ vm_ccs_dump(klass, target_mid);
+ }
+ klass = RCLASS_SUPER(klass);
+ }
+ return Qnil;
+}
+
+void
+rb_vm_mtbl_dump(const char *msg, VALUE klass, ID target_mid)
+{
+ fprintf(stderr, "[%s] ", msg);
+ vm_mtbl_dump(klass, target_mid);
+}
+
+static inline void
+vm_me_invalidate_cache(rb_callable_method_entry_t *cme)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cme, imemo_ment));
+ VM_ASSERT(callable_method_entry_p(cme));
+ METHOD_ENTRY_INVALIDATED_SET(cme);
+ RB_DEBUG_COUNTER_INC(cc_cme_invalidate);
}
void
@@ -89,31 +128,131 @@ rb_clear_constant_cache(void)
INC_GLOBAL_CONSTANT_STATE();
}
-void
-rb_clear_method_cache_by_class(VALUE klass)
+static rb_method_entry_t *rb_method_entry_alloc(ID called_id, VALUE owner, VALUE defined_class, const rb_method_definition_t *def);
+const rb_method_entry_t * rb_method_entry_clone(const rb_method_entry_t *src_me);
+static const rb_callable_method_entry_t *copmplemented_callable_method_entry(VALUE klass, ID id);
+
+static void
+clear_method_cache_by_id_in_class(VALUE klass, ID mid)
{
- if (klass && klass != Qundef) {
- int global = klass == rb_cBasicObject || klass == rb_cObject || klass == rb_mKernel;
+ VM_ASSERT(RB_TYPE_P(klass, T_CLASS) || RB_TYPE_P(klass, T_ICLASS));
- RUBY_DTRACE_HOOK(METHOD_CACHE_CLEAR, (global ? "global" : rb_class2name(klass)));
+ if (LIKELY(RCLASS_EXT(klass)->subclasses == NULL)) {
+ // no subclasses
+ // check only current class
- if (global) {
- INC_GLOBAL_METHOD_STATE();
- }
- else {
- rb_serial_t old_serial = PREV_CLASS_SERIAL();
- rb_class_clear_method_cache(klass, (VALUE)&old_serial);
- }
+ struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
+ struct rb_class_cc_entries *ccs;
+
+ // invalidate CCs
+ if (cc_tbl && rb_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs)) {
+ rb_vm_ccs_free(ccs);
+ rb_id_table_delete(cc_tbl, mid);
+ RB_DEBUG_COUNTER_INC(cc_invalidate_leaf_ccs);
+ }
+
+ // remove from callable_m_tbl, if exists
+ struct rb_id_table *cm_tbl;
+ if ((cm_tbl = RCLASS_CALLABLE_M_TBL(klass)) != NULL) {
+ rb_id_table_delete(cm_tbl, mid);
+ RB_DEBUG_COUNTER_INC(cc_invalidate_leaf_callable);
+ }
+ RB_DEBUG_COUNTER_INC(cc_invalidate_leaf);
}
+ else {
+ const rb_callable_method_entry_t *cme = copmplemented_callable_method_entry(klass, mid);
+
+ if (cme) {
+ // invalidate cme if found to invalidate the inline method cache.
+
+ if (METHOD_ENTRY_CACHED(cme)) {
+ // invalidate cc by invalidating cc->cme
+ VALUE owner = cme->owner;
+ rb_callable_method_entry_t *new_cme =
+ (rb_callable_method_entry_t *)rb_method_entry_clone((const rb_method_entry_t *)cme);
+ struct rb_id_table *mtbl = RCLASS_M_TBL(RCLASS_ORIGIN(owner));
+ rb_id_table_insert(mtbl, mid, (VALUE)new_cme);
+ RB_OBJ_WRITTEN(owner, cme, new_cme);
+ vm_me_invalidate_cache((rb_callable_method_entry_t *)cme);
+
+ RB_DEBUG_COUNTER_INC(cc_invalidate_tree_cme);
+ }
- if (klass == rb_mKernel) {
- rb_subclass_entry_t *entry = RCLASS_EXT(klass)->subclasses;
+ // invalidate complement tbl
+ if (METHOD_ENTRY_COMPLEMENTED(cme)) {
+ VALUE defined_class = cme->defined_class;
+ struct rb_id_table *cm_tbl = RCLASS_CALLABLE_M_TBL(defined_class);
+ VM_ASSERT(cm_tbl != NULL);
+ int r = rb_id_table_delete(cm_tbl, mid);
+ VM_ASSERT(r == TRUE); (void)r;
+ RB_DEBUG_COUNTER_INC(cc_invalidate_tree_callable);
+ }
- for (; entry != NULL; entry = entry->next) {
- struct rb_id_table *table = RCLASS_CALLABLE_M_TBL(entry->klass);
- if (table)rb_id_table_clear(table);
- }
+ RB_DEBUG_COUNTER_INC(cc_invalidate_tree);
+ }
+ }
+}
+
+static void
+clear_iclass_method_cache_by_id(VALUE iclass, VALUE d)
+{
+ VM_ASSERT(RB_TYPE_P(iclass, T_ICLASS));
+ ID mid = (ID)d;
+ clear_method_cache_by_id_in_class(iclass, mid);
+}
+
+static void
+clear_iclass_method_cache_by_id_for_refinements(VALUE klass, VALUE d)
+{
+ if (RB_TYPE_P(klass, T_ICLASS)) {
+ ID mid = (ID)d;
+ clear_method_cache_by_id_in_class(klass, mid);
+ }
+}
+
+void
+rb_clear_method_cache(VALUE klass_or_module, ID mid)
+{
+ if (RB_TYPE_P(klass_or_module, T_MODULE)) {
+ VALUE module = klass_or_module; // alias
+
+ if (FL_TEST(module, RMODULE_IS_REFINEMENT)) {
+ VALUE refined_class = rb_refinement_module_get_refined_class(module);
+ rb_clear_method_cache(refined_class, mid);
+ rb_class_foreach_subclass(refined_class, clear_iclass_method_cache_by_id_for_refinements, mid);
+ }
+ rb_class_foreach_subclass(module, clear_iclass_method_cache_by_id, mid);
+ }
+ else {
+ clear_method_cache_by_id_in_class(klass_or_module, mid);
+ }
+}
+
+// gc.c
+void rb_cc_table_free(VALUE klass);
+
+static int
+invalidate_all_cc(void *vstart, void *vend, size_t stride, void *data)
+{
+ VALUE v = (VALUE)vstart;
+ for (; v != (VALUE)vend; v += stride) {
+ if (RBASIC(v)->flags) { // liveness check
+ if (RB_TYPE_P(v, T_CLASS) ||
+ RB_TYPE_P(v, T_ICLASS)) {
+ if (RCLASS_CC_TBL(v)) {
+ rb_cc_table_free(v);
+ }
+ RCLASS_CC_TBL(v) = NULL;
+ }
+ }
}
+ return 0; // continue to iteration
+}
+
+void
+rb_clear_method_cache_all(void)
+{
+ rb_objspace_each_objects(invalidate_all_cc, NULL);
}
VALUE
@@ -138,7 +277,7 @@ rb_add_method_cfunc(VALUE klass, ID mid, VALUE (*func)(ANYARGS), int argc, rb_me
rb_method_cfunc_t opt;
opt.func = func;
opt.argc = argc;
- rb_add_method(klass, mid, VM_METHOD_TYPE_CFUNC, &opt, visi);
+ rb_add_method(klass, mid, VM_METHOD_TYPE_CFUNC, &opt, visi);
}
else {
rb_define_notimplement_method_id(klass, mid, visi);
@@ -161,8 +300,13 @@ rb_method_definition_release(rb_method_definition_t *def, int complemented)
xfree(def);
}
else {
- if (complemented) def->complemented_count--;
- else if (def->alias_count > 0) def->alias_count--;
+ if (complemented) {
+ VM_ASSERT(def->complemented_count > 0);
+ def->complemented_count--;
+ }
+ else if (def->alias_count > 0) {
+ def->alias_count--;
+ }
if (METHOD_DEBUG) fprintf(stderr, "-%p-%s:%d->%d,%d->%d (dec)\n", (void *)def, rb_id2name(def->original_id),
alias_count, def->alias_count, complemented_count, def->complemented_count);
@@ -179,20 +323,6 @@ rb_free_method_entry(const rb_method_entry_t *me)
static inline rb_method_entry_t *search_method(VALUE klass, ID id, VALUE *defined_class_ptr);
extern int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
-static inline rb_method_entry_t *
-lookup_method_table(VALUE klass, ID id)
-{
- st_data_t body;
- struct rb_id_table *m_tbl = RCLASS_M_TBL(klass);
-
- if (rb_id_table_lookup(m_tbl, id, &body)) {
- return (rb_method_entry_t *) body;
- }
- else {
- return 0;
- }
-}
-
static VALUE
(*call_cfunc_invoker_func(int argc))(VALUE recv, int argc, const VALUE *, VALUE (*func)(ANYARGS))
{
@@ -406,7 +536,11 @@ const rb_method_entry_t *
rb_method_entry_clone(const rb_method_entry_t *src_me)
{
rb_method_entry_t *me = rb_method_entry_alloc(src_me->called_id, src_me->owner, src_me->defined_class,
- method_definition_addref(src_me->def));
+ method_definition_addref(src_me->def));
+ if (METHOD_ENTRY_COMPLEMENTED(src_me)) {
+ method_definition_addref_complement(src_me->def);
+ }
+
METHOD_ENTRY_FLAGS_COPY(me, src_me);
return me;
}
@@ -487,6 +621,20 @@ make_method_entry_refined(VALUE owner, rb_method_entry_t *me)
}
}
+static inline rb_method_entry_t *
+lookup_method_table(VALUE klass, ID id)
+{
+ st_data_t body;
+ struct rb_id_table *m_tbl = RCLASS_M_TBL(klass);
+
+ if (rb_id_table_lookup(m_tbl, id, &body)) {
+ return (rb_method_entry_t *) body;
+ }
+ else {
+ return 0;
+ }
+}
+
void
rb_add_refined_method_entry(VALUE refined_class, ID mid)
{
@@ -494,7 +642,7 @@ rb_add_refined_method_entry(VALUE refined_class, ID mid)
if (me) {
make_method_entry_refined(refined_class, me);
- rb_clear_method_cache_by_class(refined_class);
+ rb_clear_method_cache(refined_class, mid);
}
else {
rb_add_method(refined_class, mid, VM_METHOD_TYPE_REFINED, 0, METHOD_VISI_PUBLIC);
@@ -615,7 +763,7 @@ rb_method_entry_make(VALUE klass, ID mid, VALUE defined_class, rb_method_visibil
if (def == NULL) def = rb_method_definition_create(type, original_id);
rb_method_definition_set(me, def, opts);
- rb_clear_method_cache_by_class(klass);
+ rb_clear_method_cache(klass, mid);
/* check mid */
if (klass == rb_cObject) {
@@ -737,149 +885,169 @@ rb_get_alloc_func(VALUE klass)
return 0;
}
+const rb_method_entry_t *
+rb_method_entry_at(VALUE klass, ID id)
+{
+ return lookup_method_table(klass, id);
+}
+
static inline rb_method_entry_t*
search_method(VALUE klass, ID id, VALUE *defined_class_ptr)
{
rb_method_entry_t *me;
+ RB_DEBUG_COUNTER_INC(mc_search);
+
for (; klass; klass = RCLASS_SUPER(klass)) {
RB_DEBUG_COUNTER_INC(mc_search_super);
- if ((me = lookup_method_table(klass, id)) != 0) break;
+ if ((me = lookup_method_table(klass, id)) != 0) {
+ break;
+ }
}
- if (defined_class_ptr)
- *defined_class_ptr = klass;
+ if (defined_class_ptr) *defined_class_ptr = klass;
+
+ if (me == NULL) RB_DEBUG_COUNTER_INC(mc_search_notfound);
+
+ VM_ASSERT(me == NULL || !METHOD_ENTRY_INVALIDATED(me));
return me;
}
-const rb_method_entry_t *
-rb_method_entry_at(VALUE klass, ID id)
+static rb_method_entry_t *
+search_method_protect(VALUE klass, ID id, VALUE *defined_class_ptr)
{
- return lookup_method_table(klass, id);
+ rb_method_entry_t *me = search_method(klass, id, defined_class_ptr);
+
+ if (!UNDEFINED_METHOD_ENTRY_P(me)) {
+ return me;
+ }
+ else {
+ return NULL;
+ }
}
-/*
- * search method entry without the method cache.
- *
- * if you need method entry with method cache (normal case), use
- * rb_method_entry() simply.
- */
-static rb_method_entry_t *
-method_entry_get_without_cache(VALUE klass, ID id,
- VALUE *defined_class_ptr)
+MJIT_FUNC_EXPORTED const rb_method_entry_t *
+rb_method_entry(VALUE klass, ID id)
{
- VALUE defined_class;
- rb_method_entry_t *me = search_method(klass, id, &defined_class);
+ return search_method(klass, id, NULL);
+}
- if (ruby_running) {
- if (OPT_GLOBAL_METHOD_CACHE) {
- struct cache_entry *ent;
- ent = GLOBAL_METHOD_CACHE(klass, id);
- ent->class_serial = RCLASS_SERIAL(klass);
- ent->method_state = GET_GLOBAL_METHOD_STATE();
- ent->defined_class = defined_class;
- ent->mid = id;
+static inline const rb_callable_method_entry_t *
+prepare_callable_method_entry(VALUE defined_class, ID id, const rb_method_entry_t * const me, int create)
+{
+ struct rb_id_table *mtbl;
+ const rb_callable_method_entry_t *cme;
- if (UNDEFINED_METHOD_ENTRY_P(me)) {
- me = ent->me = NULL;
- }
- else {
- ent->me = me;
- }
- }
- else if (UNDEFINED_METHOD_ENTRY_P(me)) {
- me = NULL;
- }
+ if (me) {
+ if (me->defined_class == 0) {
+ RB_DEBUG_COUNTER_INC(mc_cme_complement);
+ VM_ASSERT(RB_TYPE_P(defined_class, T_ICLASS) || RB_TYPE_P(defined_class, T_MODULE));
+ VM_ASSERT(me->defined_class == 0);
+
+ mtbl = RCLASS_CALLABLE_M_TBL(defined_class);
+
+ if (mtbl && rb_id_table_lookup(mtbl, id, (VALUE *)&cme)) {
+ RB_DEBUG_COUNTER_INC(mc_cme_complement_hit);
+ VM_ASSERT(callable_method_entry_p(cme));
+ VM_ASSERT(!METHOD_ENTRY_INVALIDATED(cme));
+ }
+ else if (create) {
+ if (!mtbl) {
+ mtbl = RCLASS_EXT(defined_class)->callable_m_tbl = rb_id_table_create(0);
+ }
+ cme = rb_method_entry_complement_defined_class(me, me->called_id, defined_class);
+ rb_id_table_insert(mtbl, id, (VALUE)cme);
+ VM_ASSERT(callable_method_entry_p(cme));
+ }
+ else {
+ return NULL;
+ }
+ }
+ else {
+ cme = (const rb_callable_method_entry_t *)me;
+ VM_ASSERT(callable_method_entry_p(cme));
+ VM_ASSERT(!METHOD_ENTRY_INVALIDATED(cme));
+ }
+ return cme;
}
- else if (UNDEFINED_METHOD_ENTRY_P(me)) {
- me = NULL;
+ else {
+ return NULL;
}
+}
- if (defined_class_ptr)
- *defined_class_ptr = defined_class;
- return me;
+static const rb_callable_method_entry_t *
+copmplemented_callable_method_entry(VALUE klass, ID id)
+{
+ VALUE defined_class;
+ rb_method_entry_t *me = search_method_protect(klass, id, &defined_class);
+ return prepare_callable_method_entry(defined_class, id, me, FALSE);
}
-static void
-verify_method_cache(VALUE klass, ID id, VALUE defined_class, rb_method_entry_t *me)
+static const rb_callable_method_entry_t *
+cached_callable_method_entry(VALUE klass, ID mid)
{
- if (!VM_DEBUG_VERIFY_METHOD_CACHE) return;
- VALUE actual_defined_class;
- rb_method_entry_t *actual_me =
- method_entry_get_without_cache(klass, id, &actual_defined_class);
+ struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
+ struct rb_class_cc_entries *ccs;
- if (me != actual_me || defined_class != actual_defined_class) {
- rb_bug("method cache verification failed");
+ if (cc_tbl && rb_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs)) {
+ VM_ASSERT(vm_ccs_p(ccs));
+
+ if (LIKELY(!METHOD_ENTRY_INVALIDATED(ccs->cme))) {
+ VM_ASSERT(ccs->cme->called_id == mid);
+ RB_DEBUG_COUNTER_INC(ccs_found);
+ return ccs->cme;
+ }
+ else {
+ rb_vm_ccs_free(ccs);
+ rb_id_table_delete(cc_tbl, mid);
+ }
}
+ return NULL;
}
-static rb_method_entry_t *
-method_entry_get(VALUE klass, ID id, VALUE *defined_class_ptr)
+static void
+cache_callable_method_entry(VALUE klass, ID mid, const rb_callable_method_entry_t *cme)
{
- struct cache_entry *ent;
- if (!OPT_GLOBAL_METHOD_CACHE) goto nocache;
- ent = GLOBAL_METHOD_CACHE(klass, id);
- if (ent->method_state == GET_GLOBAL_METHOD_STATE() &&
- ent->class_serial == RCLASS_SERIAL(klass) &&
- ent->mid == id) {
- verify_method_cache(klass, id, ent->defined_class, ent->me);
- if (defined_class_ptr) *defined_class_ptr = ent->defined_class;
- RB_DEBUG_COUNTER_INC(mc_global_hit);
- return ent->me;
- }
+ struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
+ struct rb_class_cc_entries *ccs;
- nocache:
- RB_DEBUG_COUNTER_INC(mc_global_miss);
- return method_entry_get_without_cache(klass, id, defined_class_ptr);
-}
+ if (!cc_tbl) {
+ cc_tbl = RCLASS_CC_TBL(klass) = rb_id_table_create(2);
+ }
-MJIT_FUNC_EXPORTED const rb_method_entry_t *
-rb_method_entry(VALUE klass, ID id)
-{
- return method_entry_get(klass, id, NULL);
+ if (rb_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs)) {
+ VM_ASSERT(ccs->cme == cme);
+ }
+ else {
+ ccs = vm_ccs_create(klass, cme);
+ rb_id_table_insert(cc_tbl, mid, (VALUE)ccs);
+ }
}
static const rb_callable_method_entry_t *
-prepare_callable_method_entry(VALUE defined_class, ID id, const rb_method_entry_t *me)
+callable_method_entry(VALUE klass, ID mid, VALUE *defined_class_ptr)
{
- struct rb_id_table *mtbl;
- const rb_callable_method_entry_t *cme;
+ VM_ASSERT(RB_TYPE_P(klass, T_CLASS) || RB_TYPE_P(klass, T_ICLASS));
+ const rb_callable_method_entry_t *cme = cached_callable_method_entry(klass, mid);
- if (me && me->defined_class == 0) {
- RB_DEBUG_COUNTER_INC(mc_cme_complement);
- VM_ASSERT(RB_TYPE_P(defined_class, T_ICLASS) || RB_TYPE_P(defined_class, T_MODULE));
- VM_ASSERT(me->defined_class == 0);
-
- mtbl = RCLASS_CALLABLE_M_TBL(defined_class);
-
- if (mtbl && rb_id_table_lookup(mtbl, id, (VALUE *)&me)) {
- RB_DEBUG_COUNTER_INC(mc_cme_complement_hit);
- cme = (rb_callable_method_entry_t *)me;
- VM_ASSERT(callable_method_entry_p(cme));
- }
- else {
- if (!mtbl) {
- mtbl = RCLASS_EXT(defined_class)->callable_m_tbl = rb_id_table_create(0);
- }
- cme = rb_method_entry_complement_defined_class(me, me->called_id, defined_class);
- rb_id_table_insert(mtbl, id, (VALUE)cme);
- VM_ASSERT(callable_method_entry_p(cme));
- }
+ if (cme) {
+ if (defined_class_ptr != NULL) *defined_class_ptr = cme->defined_class;
}
else {
- cme = (const rb_callable_method_entry_t *)me;
- VM_ASSERT(callable_method_entry_p(cme));
+ VALUE defined_class;
+ rb_method_entry_t *me = search_method_protect(klass, mid, &defined_class);
+ if (defined_class_ptr) *defined_class_ptr = defined_class;
+ cme = prepare_callable_method_entry(defined_class, mid, me, TRUE);
+ if (cme) cache_callable_method_entry(klass, mid, cme);
}
return cme;
}
MJIT_FUNC_EXPORTED const rb_callable_method_entry_t *
-rb_callable_method_entry(VALUE klass, ID id)
+rb_callable_method_entry(VALUE klass, ID mid)
{
- VALUE defined_class;
- rb_method_entry_t *me = method_entry_get(klass, id, &defined_class);
- return prepare_callable_method_entry(defined_class, id, me);
+ return callable_method_entry(klass, mid, NULL);
}
static const rb_method_entry_t *resolve_refined_method(VALUE refinements, const rb_method_entry_t *me, VALUE *defined_class_ptr);
@@ -887,7 +1055,7 @@ static const rb_method_entry_t *resolve_refined_method(VALUE refinements, const
static const rb_method_entry_t *
method_entry_resolve_refinement(VALUE klass, ID id, int with_refinement, VALUE *defined_class_ptr)
{
- const rb_method_entry_t *me = method_entry_get(klass, id, defined_class_ptr);
+ const rb_method_entry_t *me = search_method_protect(klass, id, defined_class_ptr);
if (me) {
if (me->def->type == VM_METHOD_TYPE_REFINED) {
@@ -916,9 +1084,15 @@ rb_method_entry_with_refinements(VALUE klass, ID id, VALUE *defined_class_ptr)
MJIT_FUNC_EXPORTED const rb_callable_method_entry_t *
rb_callable_method_entry_with_refinements(VALUE klass, ID id, VALUE *defined_class_ptr)
{
- VALUE defined_class, *dcp = defined_class_ptr ? defined_class_ptr : &defined_class;
- const rb_method_entry_t *me = method_entry_resolve_refinement(klass, id, TRUE, dcp);
- return prepare_callable_method_entry(*dcp, id, me);
+ const rb_callable_method_entry_t *cme = callable_method_entry(klass, id, defined_class_ptr);
+ if (cme == NULL || cme->def->type != VM_METHOD_TYPE_REFINED) {
+ return cme;
+ }
+ else {
+ VALUE defined_class, *dcp = defined_class_ptr ? defined_class_ptr : &defined_class;
+ const rb_method_entry_t *me = method_entry_resolve_refinement(klass, id, TRUE, dcp);
+ return prepare_callable_method_entry(*dcp, id, me, TRUE);
+ }
}
const rb_method_entry_t *
@@ -932,7 +1106,7 @@ rb_callable_method_entry_without_refinements(VALUE klass, ID id, VALUE *defined_
{
VALUE defined_class, *dcp = defined_class_ptr ? defined_class_ptr : &defined_class;
const rb_method_entry_t *me = method_entry_resolve_refinement(klass, id, FALSE, dcp);
- return prepare_callable_method_entry(*dcp, id, me);
+ return prepare_callable_method_entry(*dcp, id, me, TRUE);
}
static const rb_method_entry_t *
@@ -945,7 +1119,7 @@ resolve_refined_method(VALUE refinements, const rb_method_entry_t *me, VALUE *de
refinement = find_refinement(refinements, me->owner);
if (!NIL_P(refinement)) {
- tmp_me = method_entry_get(refinement, me->called_id, defined_class_ptr);
+ tmp_me = search_method_protect(refinement, me->called_id, defined_class_ptr);
if (tmp_me && tmp_me->def->type != VM_METHOD_TYPE_REFINED) {
return tmp_me;
@@ -963,7 +1137,7 @@ resolve_refined_method(VALUE refinements, const rb_method_entry_t *me, VALUE *de
return 0;
}
- me = method_entry_get(super, me->called_id, defined_class_ptr);
+ me = search_method_protect(super, me->called_id, defined_class_ptr);
}
return me;
}
@@ -1010,10 +1184,10 @@ remove_method(VALUE klass, ID mid)
klass, ID2SYM(mid));
}
+ rb_clear_method_cache(klass, mid);
rb_id_table_delete(RCLASS_M_TBL(klass), mid);
rb_vm_check_redefinition_opt_method(me, klass);
- rb_clear_method_cache_by_class(klass);
if (me->def->type == VM_METHOD_TYPE_REFINED) {
rb_add_refined_method_entry(klass, mid);
@@ -1069,6 +1243,7 @@ rb_export_method(VALUE klass, ID name, rb_method_visibility_t visi)
VALUE origin_class = RCLASS_ORIGIN(klass);
me = search_method(origin_class, name, &defined_class);
+
if (!me && RB_TYPE_P(klass, T_MODULE)) {
me = search_method(rb_cObject, name, &defined_class);
}
@@ -1087,7 +1262,7 @@ rb_export_method(VALUE klass, ID name, rb_method_visibility_t visi)
if (me->def->type == VM_METHOD_TYPE_REFINED && me->def->body.refined.orig_me) {
METHOD_ENTRY_VISI_SET((rb_method_entry_t *)me->def->body.refined.orig_me, visi);
}
- rb_clear_method_cache_by_class(klass);
+ rb_clear_method_cache(klass, name);
}
else {
rb_add_method(klass, name, VM_METHOD_TYPE_ZSUPER, 0, visi);
@@ -1110,8 +1285,8 @@ rb_method_boundp(VALUE klass, ID id, int ex)
me = rb_method_entry_without_refinements(klass, id, NULL);
}
- if (me != 0) {
- if ((ex & ~BOUND_RESPONDS) &&
+ if (me != NULL) {
+ if ((ex & ~BOUND_RESPONDS) &&
((METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE) ||
((ex & BOUND_RESPONDS) && (METHOD_ENTRY_VISI(me) == METHOD_VISI_PROTECTED)))) {
return 0;
@@ -1593,6 +1768,7 @@ rb_alias(VALUE klass, ID alias_name, ID original_name)
again:
orig_me = search_method(klass, original_name, &defined_class);
+
if (orig_me && orig_me->def->type == VM_METHOD_TYPE_REFINED) {
orig_me = rb_resolve_refined_method(Qnil, orig_me);
}
@@ -1841,7 +2017,7 @@ rb_mod_ruby2_keywords(int argc, VALUE *argv, VALUE module)
!me->def->body.iseq.iseqptr->body->param.flags.has_kw &&
!me->def->body.iseq.iseqptr->body->param.flags.has_kwrest) {
me->def->body.iseq.iseqptr->body->param.flags.ruby2_keywords = 1;
- rb_clear_method_cache_by_class(module);
+ rb_clear_method_cache(module, name);
}
else {
rb_warn("Skipping set of ruby2_keywords flag for %s (method accepts keywords or method does not accept argument splat)", rb_id2name(name));
@@ -1860,7 +2036,7 @@ rb_mod_ruby2_keywords(int argc, VALUE *argv, VALUE module)
!iseq->body->param.flags.has_kw &&
!iseq->body->param.flags.has_kwrest) {
iseq->body->param.flags.ruby2_keywords = 1;
- rb_clear_method_cache_by_class(module);
+ rb_clear_method_cache(module, name);
}
else {
rb_warn("Skipping set of ruby2_keywords flag for %s (method accepts keywords or method does not accept argument splat)", rb_id2name(name));
@@ -2061,10 +2237,10 @@ rb_mod_modfunc(int argc, VALUE *argv, VALUE module)
int
rb_method_basic_definition_p(VALUE klass, ID id)
{
- const rb_method_entry_t *me;
+ const rb_callable_method_entry_t *cme;
if (!klass) return TRUE; /* hidden object cannot be overridden */
- me = rb_method_entry(klass, id);
- return (me && METHOD_ENTRY_BASIC(me)) ? TRUE : FALSE;
+ cme = rb_callable_method_entry(klass, id);
+ return (cme && METHOD_ENTRY_BASIC(cme)) ? TRUE : FALSE;
}
#ifdef __GNUC__
#pragma pop_macro("rb_method_basic_definition_p")
@@ -2072,10 +2248,8 @@ rb_method_basic_definition_p(VALUE klass, ID id)
static VALUE
call_method_entry(rb_execution_context_t *ec, VALUE defined_class, VALUE obj, ID id,
- const rb_method_entry_t *me, int argc, const VALUE *argv, int kw_splat)
+ const rb_callable_method_entry_t *cme, int argc, const VALUE *argv, int kw_splat)
{
- const rb_callable_method_entry_t *cme =
- prepare_callable_method_entry(defined_class, id, me);
VALUE passed_block_handler = vm_passed_block_handler(ec);
VALUE result = rb_vm_call_kw(ec, obj, id, argc, argv, cme, kw_splat);
vm_passed_block_handler_set(ec, passed_block_handler);
@@ -2088,13 +2262,12 @@ basic_obj_respond_to_missing(rb_execution_context_t *ec, VALUE klass, VALUE obj,
{
VALUE defined_class, args[2];
const ID rtmid = idRespond_to_missing;
- const rb_method_entry_t *const me =
- method_entry_get(klass, rtmid, &defined_class);
+ const rb_callable_method_entry_t *const cme = callable_method_entry(klass, rtmid, &defined_class);
- if (!me || METHOD_ENTRY_BASIC(me)) return Qundef;
+ if (!cme || METHOD_ENTRY_BASIC(cme)) return Qundef;
args[0] = mid;
args[1] = priv;
- return call_method_entry(ec, defined_class, obj, rtmid, me, 2, args, RB_NO_KEYWORDS);
+ return call_method_entry(ec, defined_class, obj, rtmid, cme, 2, args, RB_NO_KEYWORDS);
}
static inline int
@@ -2120,11 +2293,10 @@ vm_respond_to(rb_execution_context_t *ec, VALUE klass, VALUE obj, ID id, int pri
{
VALUE defined_class;
const ID resid = idRespond_to;
- const rb_method_entry_t *const me =
- method_entry_get(klass, resid, &defined_class);
+ const rb_callable_method_entry_t *const cme = callable_method_entry(klass, resid, &defined_class);
- if (!me) return -1;
- if (METHOD_ENTRY_BASIC(me)) {
+ if (!cme) return -1;
+ if (METHOD_ENTRY_BASIC(cme)) {
return -1;
}
else {
@@ -2135,7 +2307,7 @@ vm_respond_to(rb_execution_context_t *ec, VALUE klass, VALUE obj, ID id, int pri
args[0] = ID2SYM(id);
args[1] = Qtrue;
if (priv) {
- argc = rb_method_entry_arity(me);
+ argc = rb_method_entry_arity((const rb_method_entry_t *)cme);
if (argc > 2) {
rb_raise(rb_eArgError,
"respond_to? must accept 1 or 2 arguments (requires %d)",
@@ -2145,7 +2317,7 @@ vm_respond_to(rb_execution_context_t *ec, VALUE klass, VALUE obj, ID id, int pri
argc = 2;
}
else if (!NIL_P(ruby_verbose)) {
- VALUE location = rb_method_entry_location(me);
+ VALUE location = rb_method_entry_location((const rb_method_entry_t *)cme);
rb_warn("%"PRIsVALUE"%c""respond_to?(:%"PRIsVALUE") uses"
" the deprecated method signature, which takes one parameter",
(FL_TEST(klass, FL_SINGLETON) ? obj : klass),
@@ -2161,7 +2333,7 @@ vm_respond_to(rb_execution_context_t *ec, VALUE klass, VALUE obj, ID id, int pri
}
}
}
- result = call_method_entry(ec, defined_class, obj, resid, me, argc, args, RB_NO_KEYWORDS);
+ result = call_method_entry(ec, defined_class, obj, resid, cme, argc, args, RB_NO_KEYWORDS);
return RTEST(result);
}
}
@@ -2246,25 +2418,7 @@ obj_respond_to_missing(VALUE obj, VALUE mid, VALUE priv)
void
Init_Method(void)
{
- if (!OPT_GLOBAL_METHOD_CACHE) return;
- char *ptr = getenv("RUBY_GLOBAL_METHOD_CACHE_SIZE");
- int val;
-
- if (ptr != NULL && (val = atoi(ptr)) > 0) {
- if ((val & (val - 1)) == 0) { /* ensure val is a power of 2 */
- global_method_cache.size = val;
- global_method_cache.mask = val - 1;
- }
- else {
- fprintf(stderr, "RUBY_GLOBAL_METHOD_CACHE_SIZE was set to %d but ignored because the value is not a power of 2.\n", val);
- }
- }
-
- global_method_cache.entries = (struct cache_entry *)calloc(global_method_cache.size, sizeof(struct cache_entry));
- if (global_method_cache.entries == NULL) {
- fprintf(stderr, "[FATAL] failed to allocate memory\n");
- exit(EXIT_FAILURE);
- }
+ //
}
void