aboutsummaryrefslogtreecommitdiffstats
path: root/vm_callinfo.h
diff options
context:
space:
mode:
authorKoichi Sasada <ko1@atdot.net>2020-01-08 16:14:01 +0900
committerKoichi Sasada <ko1@atdot.net>2020-02-22 09:58:59 +0900
commitb9007b6c548f91e88fd3f2ffa23de740431fa969 (patch)
tree1746393d1c5f704e8dc7e0a458198264062273bf /vm_callinfo.h
parentf2286925f08406bc857f7b03ad6779a5d61443ae (diff)
downloadruby-b9007b6c548f91e88fd3f2ffa23de740431fa969.tar.gz
Introduce disposable call-cache.
This patch contains several ideas: (1) Disposable inline method cache (IMC) for race-free inline method cache * Making call-cache (CC) as a RVALUE (GC target object) and allocate new CC on cache miss. * This technique allows race-free access from parallel processing elements like RCU. (2) Introduce per-Class method cache (pCMC) * Instead of fixed-size global method cache (GMC), pCMC allows flexible cache size. * Caching CCs reduces CC allocation and allow sharing CC's fast-path between same call-info (CI) call-sites. (3) Invalidate an inline method cache by invalidating corresponding method entries (MEs) * Instead of using class serials, we set "invalidated" flag for method entry itself to represent cache invalidation. * Compare with using class serials, the impact of method modification (add/overwrite/delete) is small. * Updating class serials invalidate all method caches of the class and sub-classes. * Proposed approach only invalidate the method cache of only one ME. See [Feature #16614] for more details.
Diffstat (limited to 'vm_callinfo.h')
-rw-r--r--vm_callinfo.h235
1 files changed, 226 insertions, 9 deletions
diff --git a/vm_callinfo.h b/vm_callinfo.h
index 33d4f614da..32b0131fa1 100644
--- a/vm_callinfo.h
+++ b/vm_callinfo.h
@@ -75,13 +75,13 @@ struct rb_callinfo {
#define CI_EMBED_FLAG 0x01
#define CI_EMBED_ARGC_SHFT (CI_EMBED_TAG_bits)
-#define CI_EMBED_ARGC_MASK ((1UL<<CI_EMBED_ARGC_bits) - 1)
+#define CI_EMBED_ARGC_MASK ((((VALUE)1)<<CI_EMBED_ARGC_bits) - 1)
#define CI_EMBED_FLAG_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits)
-#define CI_EMBED_FLAG_MASK ((1UL<<CI_EMBED_FLAG_bits) - 1)
+#define CI_EMBED_FLAG_MASK ((((VALUE)1)<<CI_EMBED_FLAG_bits) - 1)
#define CI_EMBED_ID_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits)
-#define CI_EMBED_ID_MASK ((1UL<<CI_EMBED_ID_bits) - 1)
+#define CI_EMBED_ID_MASK ((((VALUE)1)<<CI_EMBED_ID_bits) - 1)
-static inline int
+static inline bool
vm_ci_packed_p(const struct rb_callinfo *ci)
{
#if USE_EMBED_CI
@@ -89,7 +89,7 @@ vm_ci_packed_p(const struct rb_callinfo *ci)
return 1;
}
else {
- VM_ASSERT(imemo_type_p((VALUE)ci, imemo_callinfo));
+ VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
return 0;
}
#else
@@ -97,6 +97,17 @@ vm_ci_packed_p(const struct rb_callinfo *ci)
#endif
}
+static inline bool
+vm_ci_p(const struct rb_callinfo *ci)
+{
+ if (vm_ci_packed_p(ci) || IMEMO_TYPE_P(ci, imemo_callinfo)) {
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
+
static inline ID
vm_ci_mid(const struct rb_callinfo *ci)
{
@@ -141,7 +152,6 @@ vm_ci_kwarg(const struct rb_callinfo *ci)
}
}
-#if 0 // for debug
static inline void
vm_ci_dump(const struct rb_callinfo *ci)
{
@@ -153,7 +163,6 @@ vm_ci_dump(const struct rb_callinfo *ci)
rp(ci);
}
}
-#endif
#define vm_ci_new(mid, flag, argc, kwarg) vm_ci_new_(mid, flag, argc, kwarg, __FILE__, __LINE__)
#define vm_ci_new_runtime(mid, flag, argc, kwarg) vm_ci_new_runtime_(mid, flag, argc, kwarg, __FILE__, __LINE__)
@@ -162,12 +171,11 @@ static inline const struct rb_callinfo *
vm_ci_new_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
{
#if USE_EMBED_CI
-
if ((mid & ~CI_EMBED_ID_MASK) == 0 &&
(argc & ~CI_EMBED_ARGC_MASK) == 0 &&
kwarg == NULL) {
VALUE embed_ci =
- 1L |
+ 1L |
((VALUE)argc << CI_EMBED_ARGC_SHFT) |
((VALUE)flag << CI_EMBED_FLAG_SHFT) |
((VALUE)mid << CI_EMBED_ID_SHFT);
@@ -175,8 +183,11 @@ vm_ci_new_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinf
return (const struct rb_callinfo *)embed_ci;
}
#endif
+
const bool debug = 0;
if (debug) fprintf(stderr, "%s:%d ", file, line);
+
+ // TODO: dedup
const struct rb_callinfo *ci = (const struct rb_callinfo *)
rb_imemo_new(imemo_callinfo,
(VALUE)mid,
@@ -204,3 +215,209 @@ vm_ci_new_runtime_(ID mid, unsigned int flag, unsigned int argc, const struct rb
RB_DEBUG_COUNTER_INC(ci_runtime);
return vm_ci_new_(mid, flag, argc, kwarg, file, line);
}
+
+typedef VALUE (*vm_call_handler)(
+ struct rb_execution_context_struct *ec,
+ struct rb_control_frame_struct *cfp,
+ struct rb_calling_info *calling,
+ struct rb_call_data *cd);
+
+// imemo_callcache
+
+struct rb_callcache {
+ const VALUE flags;
+
+ /* inline cache: key */
+ const VALUE klass; // should not mark it because klass can not be free'd
+ // because of this marking. When klass is collected,
+ // cc will be cleared (cc->klass = 0) at vm_ccs_free().
+
+ /* inline cache: values */
+ const struct rb_callable_method_entry_struct * const cme_;
+ const vm_call_handler call_;
+
+ union {
+ const unsigned int attr_index;
+ const enum method_missing_reason method_missing_reason; /* used by method_missing */
+ } aux_;
+};
+
+#define VM_CALLCACHE_UNMARKABLE IMEMO_FL_USER0
+
+static inline const struct rb_callcache *
+vm_cc_new(VALUE klass,
+ const struct rb_callable_method_entry_struct *cme,
+ vm_call_handler call)
+{
+ const struct rb_callcache *cc = (const struct rb_callcache *)rb_imemo_new(imemo_callcache, (VALUE)cme, (VALUE)call, 0, klass);
+ RB_DEBUG_COUNTER_INC(cc_new);
+ return cc;
+}
+
+static inline const struct rb_callcache *
+vm_cc_fill(struct rb_callcache *cc,
+ VALUE klass,
+ const struct rb_callable_method_entry_struct *cme,
+ vm_call_handler call)
+{
+ struct rb_callcache cc_body = {
+ .flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
+ .klass = klass,
+ .cme_ = cme,
+ .call_ = call,
+ };
+ MEMCPY(cc, &cc_body, struct rb_callcache, 1);
+ return cc;
+}
+
+static inline bool
+vm_cc_class_check(const struct rb_callcache *cc, VALUE klass)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(cc->klass == 0 ||
+ RB_TYPE_P(cc->klass, T_CLASS) || RB_TYPE_P(cc->klass, T_ICLASS));
+ return cc->klass == klass;
+}
+
+static inline const struct rb_callable_method_entry_struct *
+vm_cc_cme(const struct rb_callcache *cc)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ return cc->cme_;
+}
+
+static inline vm_call_handler
+vm_cc_call(const struct rb_callcache *cc)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ return cc->call_;
+}
+
+static inline unsigned int
+vm_cc_attr_index(const struct rb_callcache *cc)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ return cc->aux_.attr_index;
+}
+
+static inline unsigned int
+vm_cc_cmethod_missing_reason(const struct rb_callcache *cc)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ return cc->aux_.method_missing_reason;
+}
+
+static inline int
+vm_cc_markable(const struct rb_callcache *cc)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ return FL_TEST_RAW(cc, VM_CALLCACHE_UNMARKABLE) == 0;
+}
+
+static inline bool
+vm_cc_valid_p(const struct rb_callcache *cc, VALUE klass)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ if (cc->klass == klass && !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc))) {
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
+
+#ifndef MJIT_HEADER
+extern const struct rb_callcache *vm_empty_cc;
+#else
+extern const struct rb_callcache *rb_vm_empty_cc(void);
+#endif
+
+static inline const struct rb_callcache *
+vm_cc_empty(void)
+{
+#ifndef MJIT_HEADER
+ return vm_empty_cc;
+#else
+ return rb_vm_empty_cc();
+#endif
+}
+
+/* callcache: mutete */
+
+static inline void
+vm_cc_cme_set(const struct rb_callcache *cc, const struct rb_callable_method_entry_struct *cme)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(cc != vm_cc_empty());
+ VM_ASSERT(vm_cc_cme(cc) != NULL);
+ VM_ASSERT(vm_cc_cme(cc)->called_id == cme->called_id);
+ VM_ASSERT(!vm_cc_markable(cc)); // only used for vm_eval.c
+
+ *((const struct rb_callable_method_entry_struct **)&cc->cme_) = cme;
+}
+
+static inline void
+vm_cc_call_set(const struct rb_callcache *cc, vm_call_handler call)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(cc != vm_cc_empty());
+ *(vm_call_handler *)&cc->call_ = call;
+}
+
+static inline void
+vm_cc_attr_index_set(const struct rb_callcache *cc, int index)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(cc != vm_cc_empty());
+ *(int *)&cc->aux_.attr_index = index;
+}
+
+static inline void
+vm_cc_method_missing_reason_set(const struct rb_callcache *cc, enum method_missing_reason reason)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(cc != vm_cc_empty());
+ *(enum method_missing_reason *)&cc->aux_.method_missing_reason = reason;
+}
+
+static inline void
+vm_cc_invalidate(const struct rb_callcache *cc)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(cc != vm_cc_empty());
+ VM_ASSERT(cc->klass != 0); // should be enable
+
+ *(VALUE *)&cc->klass = 0;
+ RB_DEBUG_COUNTER_INC(cc_ent_invalidate);
+}
+
+/* calldata */
+
+struct rb_call_data {
+ const struct rb_callinfo *ci;
+ const struct rb_callcache *cc;
+};
+
+struct rb_class_cc_entries {
+#if VM_CHECK_MODE > 0
+ VALUE debug_sig;
+#endif
+ int capa;
+ int len;
+ const struct rb_callable_method_entry_struct *cme;
+ struct rb_class_cc_entries_entry {
+ const struct rb_callinfo *ci;
+ const struct rb_callcache *cc;
+ } *entries;
+};
+
+#if VM_CHECK_MODE > 0
+static inline bool
+vm_ccs_p(const struct rb_class_cc_entries *ccs)
+{
+ return ccs->debug_sig == ~(VALUE)ccs;
+}
+#endif
+
+// gc.c
+void rb_vm_ccs_free(struct rb_class_cc_entries *ccs);