aboutsummaryrefslogtreecommitdiffstats
path: root/mjit.c
diff options
context:
space:
mode:
authorAlan Wu <XrXr@users.noreply.github.com>2019-07-30 21:36:05 -0400
committer卜部昌平 <shyouhei@ruby-lang.org>2019-10-24 18:03:42 +0900
commit89e7997622038f82115f34dbb4ea382e02bed163 (patch)
tree993a5f6fb17418381e835be1fd51093dc620148a /mjit.c
parent38e931fa2ceac6d922f3eabedb8f35f211de0bdb (diff)
downloadruby-89e7997622038f82115f34dbb4ea382e02bed163.tar.gz
Combine call info and cache to speed up method invocation
To perform a regular method call, the VM needs two structs, `rb_call_info` and `rb_call_cache`. At the moment, we allocate these two structures in separate buffers. In the worst case, the CPU needs to read 4 cache lines to complete a method call. Putting the two structures together reduces the maximum number of cache line reads to 2. Combining the structures also saves 8 bytes per call site as the current layout uses separate two pointers for the call info and the call cache. This saves about 2 MiB on Discourse. This change improves the Optcarrot benchmark at least 3%. For more details, see attached bugs.ruby-lang.org ticket. Complications: - A new instruction attribute `comptime_sp_inc` is introduced to calculate SP increase at compile time without using call caches. At compile time, a `TS_CALLDATA` operand points to a call info struct, but at runtime, the same operand points to a call data struct. Instruction that explicitly define `sp_inc` also need to define `comptime_sp_inc`. - MJIT code for copying call cache becomes slightly more complicated. - This changes the bytecode format, which might break existing tools. [Misc #16258]
Diffstat (limited to 'mjit.c')
-rw-r--r--mjit.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/mjit.c b/mjit.c
index 3ae9410179..f1e3934326 100644
--- a/mjit.c
+++ b/mjit.c
@@ -46,7 +46,16 @@ mjit_copy_job_handler(void *data)
const struct rb_iseq_constant_body *body = job->iseq->body;
if (job->cc_entries) {
- memcpy(job->cc_entries, body->cc_entries, sizeof(struct rb_call_cache) * (body->ci_size + body->ci_kw_size));
+ unsigned int i;
+ struct rb_call_cache *sink = job->cc_entries;
+ const struct rb_call_data *calls = body->call_data;
+ const struct rb_kwarg_call_data *kw_calls = (struct rb_kwarg_call_data *)&body->call_data[body->ci_size];
+ for (i = 0; i < body->ci_size; i++) {
+ *sink++ = calls[i].cc;
+ }
+ for (i = 0; i < body->ci_kw_size; i++) {
+ *sink++ = kw_calls[i].cc;
+ }
}
if (job->is_entries) {
memcpy(job->is_entries, body->is_entries, sizeof(union iseq_inline_storage_entry) * body->is_size);