From d5ec9ec308dccaeea2a723e070a98df4159183de Mon Sep 17 00:00:00 2001 From: ko1 Date: Sat, 19 Sep 2015 17:59:58 +0000 Subject: * vm_core.h: split rb_call_info_t into several structs. * rb_call_info (ci) has compiled fixed information. * if ci->flag & VM_CALL_KWARG, then rb_call_info is also rb_call_info_with_kwarg. This technique reduce one word for major rb_call_info data. * rb_calling_info has temporary data (argc, blockptr, recv). for each method dispatch. This data is allocated only on machine stack. * rb_call_cache is for inline method cache. Before this patch, only rb_call_info_t data is passed. After this patch, above three structs are passed. This patch improves: * data locarity (rb_call_info is now read-only data). * reduce memory consumption (rb_call_info_with_kwarg, rb_calling_info). * compile.c: use above data. * insns.def: ditto. * iseq.c: ditto. * vm_args.c: ditto. * vm_eval.c: ditto. * vm_insnhelper.c: ditto. * vm_insnhelper.h: ditto. * iseq.h: add iseq_compile_data::ci_index and iseq_compile_data::ci_kw_indx. * tool/instruction.rb: introduce TS_CALLCACHE operand type. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51903 b2dd03c8-39d4-4d8f-98ff-823fe69b080e --- iseq.c | 48 +++++++++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 19 deletions(-) (limited to 'iseq.c') diff --git a/iseq.c b/iseq.c index de09949e5e..d3e918d1bf 100644 --- a/iseq.c +++ b/iseq.c @@ -74,14 +74,14 @@ rb_iseq_free(const rb_iseq_t *iseq) ruby_xfree((void *)iseq->body->local_table); ruby_xfree((void *)iseq->body->is_entries); - if (iseq->body->callinfo_entries) { + if (iseq->body->ci_entries) { unsigned int i; - for (i=0; ibody->callinfo_size; i++) { - /* TODO: revisit callinfo data structure */ - const rb_call_info_kw_arg_t *kw_arg = iseq->body->callinfo_entries[i].kw_arg; + struct rb_call_info_with_kwarg *ci_kw_entries = (struct rb_call_info_with_kwarg *)&iseq->body->ci_entries[iseq->body->ci_size]; + for (i=0; ibody->ci_kw_size; i++) { + const struct rb_call_info_kw_arg *kw_arg = ci_kw_entries[i].kw_arg; ruby_xfree((void *)kw_arg); } - ruby_xfree(iseq->body->callinfo_entries); + ruby_xfree(iseq->body->ci_entries); } ruby_xfree((void *)iseq->body->catch_table); ruby_xfree((void *)iseq->body->param.opt_table); @@ -161,7 +161,7 @@ iseq_memsize(const rb_iseq_t *iseq) } if (body) { - rb_call_info_t *ci_entries = body->callinfo_entries; + struct rb_call_info_with_kwarg *ci_kw_entries = (struct rb_call_info_with_kwarg *)&body->ci_entries[body->ci_size]; size += sizeof(struct rb_iseq_constant_body); size += body->iseq_size * sizeof(VALUE); @@ -173,13 +173,14 @@ iseq_memsize(const rb_iseq_t *iseq) size += (body->param.opt_num + 1) * sizeof(VALUE); size += param_keyword_size(body->param.keyword); size += body->is_size * sizeof(union iseq_inline_storage_entry); - size += body->callinfo_size * sizeof(rb_call_info_t); + size += body->ci_size * sizeof(struct rb_call_info); + size += body->ci_kw_size * sizeof(struct rb_call_info_with_kwarg); - if (ci_entries) { + if (ci_kw_entries) { unsigned int i; - for (i = 0; i < body->callinfo_size; i++) { - const rb_call_info_kw_arg_t *kw_arg = ci_entries[i].kw_arg; + for (i = 0; i < body->ci_kw_size; i++) { + const struct rb_call_info_kw_arg *kw_arg = ci_kw_entries[i].kw_arg; if (kw_arg) { size += rb_call_info_kw_arg_bytes(kw_arg->keyword_len); @@ -1267,7 +1268,7 @@ rb_insn_operand_intern(const rb_iseq_t *iseq, case TS_CALLINFO: { - rb_call_info_t *ci = (rb_call_info_t *)op; + struct rb_call_info *ci = (struct rb_call_info *)op; VALUE ary = rb_ary_new(); if (ci->mid) { @@ -1276,8 +1277,8 @@ rb_insn_operand_intern(const rb_iseq_t *iseq, rb_ary_push(ary, rb_sprintf("argc:%d", ci->orig_argc)); - if (ci->kw_arg) { - rb_ary_push(ary, rb_sprintf("kw:%d", ci->kw_arg->keyword_len)); + if (ci->flag & VM_CALL_KWARG) { + rb_ary_push(ary, rb_sprintf("kw:%d", ((struct rb_call_info_with_kwarg *)ci)->kw_arg->keyword_len)); } if (ci->flag) { @@ -1288,6 +1289,7 @@ rb_insn_operand_intern(const rb_iseq_t *iseq, if (ci->flag & VM_CALL_VCALL) rb_ary_push(flags, rb_str_new2("VCALL")); if (ci->flag & VM_CALL_TAILCALL) rb_ary_push(flags, rb_str_new2("TAILCALL")); if (ci->flag & VM_CALL_SUPER) rb_ary_push(flags, rb_str_new2("SUPER")); + if (ci->flag & VM_CALL_KWARG) rb_ary_push(flags, rb_str_new2("KWARG")); if (ci->flag & VM_CALL_OPT_SEND) rb_ary_push(flags, rb_str_new2("SNED")); /* maybe not reachable */ if (ci->flag & VM_CALL_ARGS_SIMPLE) rb_ary_push(flags, rb_str_new2("ARGS_SIMPLE")); /* maybe not reachable */ rb_ary_push(ary, rb_ary_join(flags, rb_str_new2("|"))); @@ -1296,6 +1298,10 @@ rb_insn_operand_intern(const rb_iseq_t *iseq, } break; + case TS_CALLCACHE: + ret = rb_str_new2(""); + break; + case TS_CDHASH: ret = rb_str_new2(""); break; @@ -1883,20 +1889,21 @@ iseq_data_to_ary(const rb_iseq_t *iseq) break; case TS_CALLINFO: { - rb_call_info_t *ci = (rb_call_info_t *)*seq; + struct rb_call_info *ci = (struct rb_call_info *)*seq; VALUE e = rb_hash_new(); int orig_argc = ci->orig_argc; rb_hash_aset(e, ID2SYM(rb_intern("mid")), ci->mid ? ID2SYM(ci->mid) : Qnil); rb_hash_aset(e, ID2SYM(rb_intern("flag")), UINT2NUM(ci->flag)); - if (ci->kw_arg) { + if (ci->flag & VM_CALL_KWARG) { + struct rb_call_info_with_kwarg *ci_kw = (struct rb_call_info_with_kwarg *)ci; int i; - VALUE kw = rb_ary_new2((long)ci->kw_arg->keyword_len); + VALUE kw = rb_ary_new2((long)ci_kw->kw_arg->keyword_len); - orig_argc -= ci->kw_arg->keyword_len; - for (i = 0; i < ci->kw_arg->keyword_len; i++) { - rb_ary_push(kw, ci->kw_arg->keywords[i]); + orig_argc -= ci_kw->kw_arg->keyword_len; + for (i = 0; i < ci_kw->kw_arg->keyword_len; i++) { + rb_ary_push(kw, ci_kw->kw_arg->keywords[i]); } rb_hash_aset(e, ID2SYM(rb_intern("kw_arg")), kw); } @@ -1906,6 +1913,9 @@ iseq_data_to_ary(const rb_iseq_t *iseq) rb_ary_push(ary, e); } break; + case TS_CALLCACHE: + rb_ary_push(ary, Qfalse); + break; case TS_ID: rb_ary_push(ary, ID2SYM(*seq)); break; -- cgit v1.2.3