aboutsummaryrefslogtreecommitdiffstats
path: root/tool/ruby_vm/views/_mjit_compile_send.erb
blob: a5db6e7f4463a2d9f36abc7136e3887c7446a55f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
% # -*- C -*-
% # Copyright (c) 2018 Takashi Kokubun.  All rights reserved.
% #
% # This file is a part of  the programming language Ruby.  Permission is hereby
% # granted, to either  redistribute and/or modify this file,  provided that the
% # conditions mentioned  in the  file COPYING  are met.   Consult the  file for
% # details.
%
% # Optimized case of send / opt_send_without_block instructions.
{
% # compiler: Prepare operands which may be used by `insn.call_attribute`
% insn.opes.each_with_index do |ope, i|
    MAYBE_UNUSED(<%= ope.fetch(:decl) %>) = (<%= ope.fetch(:type) %>)operands[<%= i %>];
% end
% # compiler: Use captured cc to avoid race condition
    const struct rb_callcache *captured_cc = captured_cc_entries(status)[call_data_index(cd, body)];
%
    const rb_iseq_t *iseq;
    const CALL_INFO ci = cd->ci;
    if (!status->compile_info->disable_send_cache && has_valid_method_type(captured_cc)
        // CC_SET_FASTPATH in vm_callee_setup_arg
        && !(vm_ci_flag(ci) & VM_CALL_TAILCALL) // inlining non-tailcall path
        && vm_cc_cme(captured_cc)->def->type == VM_METHOD_TYPE_ISEQ
        && fastpath_applied_iseq_p(ci, captured_cc, iseq = def_iseq_ptr(vm_cc_cme(captured_cc)->def))) {

        int param_size = iseq->body->param.size;
        int sp_inc = (int)sp_inc_of_sendish(ci);

        fprintf(f, "{\n");
% # JIT: Declare stack_size to be used in some macro of _mjit_compile_insn_body.erb
        if (status->local_stack_p) {
            fprintf(f, "    MAYBE_UNUSED(unsigned int) stack_size = %u;\n", b->stack_size);
        }

% # JIT: Invalidate call cache if it requires vm_search_method. This allows to inline some of following things.
        fprintf(f, "    const struct rb_callcache *cc = (const struct rb_callcache *)0x%"PRIxVALUE";\n", (VALUE)captured_cc);
        fprintf(f, "    const rb_callable_method_entry_t *cc_cme = (const rb_callable_method_entry_t *)0x%"PRIxVALUE";\n", (VALUE)vm_cc_cme(captured_cc));
        fprintf(f, "    if (UNLIKELY(!vm_cc_valid_p(cc, cc_cme, CLASS_OF(stack[%d])))) {\n", b->stack_size + sp_inc - 1);
        fprintf(f, "        reg_cfp->pc = original_body_iseq + %d;\n", pos);
        fprintf(f, "        reg_cfp->sp = vm_base_ptr(reg_cfp) + %d;\n", b->stack_size);
        fprintf(f, "        goto send_cancel;\n");
        fprintf(f, "    }\n");

% # JIT: move sp and pc if necessary
<%= render 'mjit_compile_pc_and_sp', locals: { insn: insn } -%>

% # JIT: If ISeq is inlinable, call the inlined method without pushing a frame.
        if (status->inlined_iseqs != NULL && status->inlined_iseqs[pos] == iseq->body) {
            fprintf(f, "    {\n");
            fprintf(f, "        VALUE orig_self = reg_cfp->self;\n");
            fprintf(f, "        reg_cfp->self = stack[%d];\n", b->stack_size + sp_inc - 1);
            fprintf(f, "        stack[%d] = _mjit_inlined_%d(ec, reg_cfp, orig_self, original_iseq);\n", b->stack_size + sp_inc - 1, pos);
            fprintf(f, "        reg_cfp->self = orig_self;\n");
            fprintf(f, "    }\n");
        }
        else {
% # JIT: Forked `vm_sendish` to inline various things
            fprintf(f, "    {\n");
            fprintf(f, "        VALUE val;\n");
            fprintf(f, "        struct rb_calling_info calling;\n");
% if insn.name == 'send'
            fprintf(f, "        calling.block_handler = vm_caller_setup_arg_block(ec, reg_cfp, (const struct rb_callinfo *)0x%"PRIxVALUE", (rb_iseq_t *)0x%"PRIxVALUE", FALSE);\n", (VALUE)ci, (VALUE)blockiseq);
% else
            fprintf(f, "        calling.block_handler = VM_BLOCK_HANDLER_NONE;\n");
% end
            fprintf(f, "        calling.argc = %d;\n", vm_ci_argc(ci));
            fprintf(f, "        calling.recv = stack[%d];\n", b->stack_size + sp_inc - 1);

%           # fastpath_applied_iseq_p checks rb_simple_iseq_p, which ensures has_opt == FALSE
            fprintf(f, "        vm_call_iseq_setup_normal(ec, reg_cfp, &calling, cc_cme, 0, %d, %d);\n", param_size, iseq->body->local_table_size);
            if (iseq->body->catch_except_p) {
                fprintf(f, "        VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);\n");
                fprintf(f, "        val = vm_exec(ec, TRUE);\n");
            }
            else {
                fprintf(f, "        if ((val = mjit_exec(ec)) == Qundef) {\n");
                fprintf(f, "            VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);\n"); // This is vm_call0_body's code after vm_call_iseq_setup
                fprintf(f, "            val = vm_exec(ec, FALSE);\n");
                fprintf(f, "        }\n");
            }
            fprintf(f, "        stack[%d] = val;\n", b->stack_size + sp_inc - 1);
            fprintf(f, "    }\n");

% # JIT: We should evaluate ISeq modified for TracePoint if it's enabled. Note: This is slow.
            fprintf(f, "    if (UNLIKELY(!mjit_call_p)) {\n");
            fprintf(f, "        reg_cfp->sp = vm_base_ptr(reg_cfp) + %d;\n", b->stack_size + (int)<%= insn.call_attribute('sp_inc') %>);
            if (!pc_moved_p) {
                fprintf(f, "        reg_cfp->pc = original_body_iseq + %d;\n", next_pos);
            }
            fprintf(f, "        RB_DEBUG_COUNTER_INC(mjit_cancel_invalidate_all);\n");
            fprintf(f, "        goto cancel;\n");
            fprintf(f, "    }\n");
        }

% # compiler: Move JIT compiler's internal stack pointer
        b->stack_size += <%= insn.call_attribute('sp_inc') %>;

        fprintf(f, "}\n");
        break;
    }
}