aboutsummaryrefslogtreecommitdiffstats
path: root/vm_insnhelper.h
diff options
context:
space:
mode:
authorshyouhei <shyouhei@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2018-12-26 00:59:37 +0000
committershyouhei <shyouhei@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2018-12-26 00:59:37 +0000
commitd46ab953765e2114b6f2f58792be9a7a974e5f27 (patch)
tree893d6b7748b296a17450bc8d4354dd689c4caeb4 /vm_insnhelper.h
parent686881d383cfe44c67875aada64207c5e0abaa8d (diff)
downloadruby-d46ab953765e2114b6f2f58792be9a7a974e5f27.tar.gz
insns.def: refactor to avoid CALL_METHOD macro
These send and its variant instructions are the most frequently called paths in the entire process. Reducing macro expansions to make them dedicated function called vm_sendish() is the main goal of this changeset. It reduces the size of vm_exec_coref from 25,552 bytes to 23,728 bytes on my machine. I see no significant slowdown. Fix: [GH-2056] vanilla: ruby 2.6.0dev (2018-12-19 trunk 66449) [x86_64-darwin15] ours: ruby 2.6.0dev (2018-12-19 refactor-send 66449) [x86_64-darwin15] last_commit=insns.def: refactor to avoid CALL_METHOD macro Calculating ------------------------------------- vanilla ours vm2_defined_method 2.645M 2.823M i/s - 6.000M times in 5.109888s 4.783254s vm2_method 8.553M 8.873M i/s - 6.000M times in 1.579892s 1.524026s vm2_method_missing 3.772M 3.858M i/s - 6.000M times in 3.579482s 3.499220s vm2_method_with_block 8.494M 8.944M i/s - 6.000M times in 1.589774s 1.509463s vm2_poly_method 0.571 0.607 i/s - 1.000 times in 3.947570s 3.733528s vm2_poly_method_ov 5.514 5.168 i/s - 1.000 times in 0.408156s 0.436169s vm3_clearmethodcache 2.875 2.837 i/s - 1.000 times in 0.783018s 0.793493s Comparison: vm2_defined_method ours: 2822555.4 i/s vanilla: 2644878.1 i/s - 1.07x slower vm2_method ours: 8872947.8 i/s vanilla: 8553433.1 i/s - 1.04x slower vm2_method_missing ours: 3858192.3 i/s vanilla: 3772296.3 i/s - 1.02x slower vm2_method_with_block ours: 8943825.1 i/s vanilla: 8493955.0 i/s - 1.05x slower vm2_poly_method ours: 0.6 i/s vanilla: 0.6 i/s - 1.06x slower vm2_poly_method_ov vanilla: 5.5 i/s ours: 5.2 i/s - 1.07x slower vm3_clearmethodcache vanilla: 2.9 i/s ours: 2.8 i/s - 1.01x slower git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@66565 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
Diffstat (limited to 'vm_insnhelper.h')
-rw-r--r--vm_insnhelper.h36
1 files changed, 0 insertions, 36 deletions
diff --git a/vm_insnhelper.h b/vm_insnhelper.h
index e44d91ec34..02745de01a 100644
--- a/vm_insnhelper.h
+++ b/vm_insnhelper.h
@@ -137,42 +137,6 @@ enum vm_regan_acttype {
/* deal with control flow 2: method/iterator */
/**********************************************************/
-#ifdef MJIT_HEADER
-/* When calling ISeq which may catch an exception from JIT-ed code, we should not call
- mjit_exec directly to prevent the caller frame from being canceled. That's because
- the caller frame may have stack values in the local variables and the cancelling
- the caller frame will purge them. But directly calling mjit_exec is faster... */
-#define EXEC_EC_CFP(val) do { \
- if (ec->cfp->iseq->body->catch_except_p) { \
- VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH); \
- val = vm_exec(ec, TRUE); \
- } \
- else if ((val = mjit_exec(ec)) == Qundef) { \
- VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH); \
- val = vm_exec(ec, FALSE); \
- } \
-} while (0)
-#else
-/* When calling from VM, longjmp in the callee won't purge any JIT-ed caller frames.
- So it's safe to directly call mjit_exec. */
-#define EXEC_EC_CFP(val) do { \
- if ((val = mjit_exec(ec)) == Qundef) { \
- RESTORE_REGS(); \
- NEXT_INSN(); \
- } \
-} while (0)
-#endif
-
-#define CALL_METHOD(calling, ci, cc) do { \
- VALUE v = (*(cc)->call)(ec, GET_CFP(), (calling), (ci), (cc)); \
- if (v == Qundef) { \
- EXEC_EC_CFP(val); \
- } \
- else { \
- val = v; \
- } \
-} while (0)
-
/* set fastpath when cached method is *NOT* protected
* because inline method cache does not care about receiver.
*/