aboutsummaryrefslogtreecommitdiffstats
path: root/mjit_worker.c
diff options
context:
space:
mode:
authorKoichi Sasada <ko1@atdot.net>2020-01-08 16:14:01 +0900
committerKoichi Sasada <ko1@atdot.net>2020-02-22 09:58:59 +0900
commitb9007b6c548f91e88fd3f2ffa23de740431fa969 (patch)
tree1746393d1c5f704e8dc7e0a458198264062273bf /mjit_worker.c
parentf2286925f08406bc857f7b03ad6779a5d61443ae (diff)
downloadruby-b9007b6c548f91e88fd3f2ffa23de740431fa969.tar.gz
Introduce disposable call-cache.
This patch contains several ideas: (1) Disposable inline method cache (IMC) for race-free inline method cache * Making call-cache (CC) as a RVALUE (GC target object) and allocate new CC on cache miss. * This technique allows race-free access from parallel processing elements like RCU. (2) Introduce per-Class method cache (pCMC) * Instead of fixed-size global method cache (GMC), pCMC allows flexible cache size. * Caching CCs reduces CC allocation and allow sharing CC's fast-path between same call-info (CI) call-sites. (3) Invalidate an inline method cache by invalidating corresponding method entries (MEs) * Instead of using class serials, we set "invalidated" flag for method entry itself to represent cache invalidation. * Compare with using class serials, the impact of method modification (add/overwrite/delete) is small. * Updating class serials invalidate all method caches of the class and sub-classes. * Proposed approach only invalidate the method cache of only one ME. See [Feature #16614] for more details.
Diffstat (limited to 'mjit_worker.c')
-rw-r--r--mjit_worker.c30
1 files changed, 1 insertions, 29 deletions
diff --git a/mjit_worker.c b/mjit_worker.c
index ce8133ac7d..85411847d7 100644
--- a/mjit_worker.c
+++ b/mjit_worker.c
@@ -122,32 +122,6 @@ typedef intptr_t pid_t;
#define MJIT_TMP_PREFIX "_ruby_mjit_"
-// The unit structure that holds metadata of ISeq for MJIT.
-struct rb_mjit_unit {
- // Unique order number of unit.
- int id;
- // Dlopen handle of the loaded object file.
- void *handle;
- rb_iseq_t *iseq;
-#ifndef _MSC_VER
- // This value is always set for `compact_all_jit_code`. Also used for lazy deletion.
- char *o_file;
- // true if it's inherited from parent Ruby process and lazy deletion should be skipped.
- // `o_file = NULL` can't be used to skip lazy deletion because `o_file` could be used
- // by child for `compact_all_jit_code`.
- bool o_file_inherited_p;
-#endif
-#if defined(_WIN32)
- // DLL cannot be removed while loaded on Windows. If this is set, it'll be lazily deleted.
- char *so_file;
-#endif
- // Only used by unload_units. Flag to check this unit is currently on stack or not.
- char used_code_p;
- struct list_node unode;
- // mjit_compile's optimization switches
- struct rb_mjit_compile_info compile_info;
-};
-
// Linked list of struct rb_mjit_unit.
struct rb_mjit_unit_list {
struct list_head head;
@@ -1117,7 +1091,6 @@ convert_unit_to_func(struct rb_mjit_unit *unit)
typedef struct {
const rb_iseq_t *iseq;
- struct rb_call_cache *cc_entries;
union iseq_inline_storage_entry *is_entries;
bool finish_p;
} mjit_copy_job_t;
@@ -1138,7 +1111,7 @@ int rb_workqueue_register(unsigned flags, rb_postponed_job_func_t , void *);
// We're lazily copying cache values from main thread because these cache values
// could be different between ones on enqueue timing and ones on dequeue timing.
bool
-mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc_entries, union iseq_inline_storage_entry *is_entries)
+mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, union iseq_inline_storage_entry *is_entries)
{
mjit_copy_job_t *job = &mjit_copy_job; // just a short hand
@@ -1146,7 +1119,6 @@ mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc
job->finish_p = true; // disable dispatching this job in mjit_copy_job_handler while it's being modified
CRITICAL_SECTION_FINISH(3, "in mjit_copy_cache_from_main_thread");
- job->cc_entries = cc_entries;
job->is_entries = is_entries;
CRITICAL_SECTION_START(3, "in mjit_copy_cache_from_main_thread");