diff options
author | mame <mame@b2dd03c8-39d4-4d8f-98ff-823fe69b080e> | 2017-12-20 04:24:14 +0000 |
---|---|---|
committer | mame <mame@b2dd03c8-39d4-4d8f-98ff-823fe69b080e> | 2017-12-20 04:24:14 +0000 |
commit | c08e8886badd47890a54bdc54f1c09de7ad5c8e8 (patch) | |
tree | 4e6482561ec1853289c3cdba8c77251728418d17 | |
parent | e7464561b5151501beb356fc750d5dd1a88014f7 (diff) | |
download | ruby-c08e8886badd47890a54bdc54f1c09de7ad5c8e8.tar.gz |
compile.c: add a RUBY_EVENT_COVERAGE_LINE event for line coverage
2.5's line coverage measurement was about two times slower than 2.4
because of two reasons; (1) vm_trace uses rb_iseq_event_flags (which
takes O(n) currently where n is the length of iseq) to get an event
type, and (2) RUBY_EVENT_LINE uses setjmp to call an event hook.
This change adds a special event for line coverage,
RUBY_EVENT_COVERAGE_LINE, and adds `tracecoverage` instructions where
the event occurs in iseq.
`tracecoverage` instruction calls an event hook without vm_trace.
And, RUBY_EVENT_COVERAGE_LINE is an internal event which does not
use setjmp.
This change also cancells lineno change due to the deletion of trace
instructions [Feature #14104]. So fixes [Bug #14191].
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@61350 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
-rw-r--r-- | compile.c | 13 | ||||
-rw-r--r-- | include/ruby/ruby.h | 2 | ||||
-rw-r--r-- | insns.def | 6 | ||||
-rw-r--r-- | iseq.c | 12 | ||||
-rw-r--r-- | thread.c | 4 | ||||
-rw-r--r-- | vm_core.h | 3 |
6 files changed, 20 insertions, 20 deletions
@@ -250,6 +250,16 @@ struct iseq_compile_data_ensure_node_stack { #define ADD_TRACE(seq, event) \ ADD_ELEM((seq), (LINK_ELEMENT *)new_trace_body(iseq, (event))) +#define ADD_TRACE_LINE_COVERAGE(seq, line) \ + do { \ + if (ISEQ_COVERAGE(iseq) && \ + ISEQ_LINE_COVERAGE(iseq) && \ + (line) > 0) { \ + RARRAY_ASET(ISEQ_LINE_COVERAGE(iseq), (line) - 1, INT2FIX(0)); \ + ADD_INSN2((seq), (line), tracecoverage, INT2FIX(RUBY_EVENT_COVERAGE_LINE), INT2FIX(line)); \ + } \ + } while (0) + #define DECL_BRANCH_BASE(branches, first_line, first_column, last_line, last_column, type) \ do { \ @@ -280,7 +290,7 @@ struct iseq_compile_data_ensure_node_stack { rb_ary_push(branches, INT2FIX(last_line)); \ rb_ary_push(branches, INT2FIX(last_column)); \ rb_ary_push(branches, INT2FIX(counter_idx)); \ - ADD_INSN2((seq), (first_line), tracebranch, INT2FIX(RUBY_EVENT_COVERAGE_BRANCH), INT2FIX(counter_idx)); \ + ADD_INSN2((seq), (first_line), tracecoverage, INT2FIX(RUBY_EVENT_COVERAGE_BRANCH), INT2FIX(counter_idx)); \ } \ } while (0) @@ -5421,6 +5431,7 @@ iseq_compile_each0(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *node, in else { if (node->flags & NODE_FL_NEWLINE) { ISEQ_COMPILE_DATA(iseq)->last_line = line; + ADD_TRACE_LINE_COVERAGE(ret, line); ADD_TRACE(ret, RUBY_EVENT_LINE); } } diff --git a/include/ruby/ruby.h b/include/ruby/ruby.h index 3210103c64..0959413dac 100644 --- a/include/ruby/ruby.h +++ b/include/ruby/ruby.h @@ -2111,7 +2111,7 @@ int ruby_native_thread_p(void); #define RUBY_INTERNAL_EVENT_GC_ENTER 0x2000000 #define RUBY_INTERNAL_EVENT_GC_EXIT 0x4000000 #define RUBY_INTERNAL_EVENT_OBJSPACE_MASK 0x7f00000 -#define RUBY_INTERNAL_EVENT_MASK 0xfffe0000 +#define RUBY_INTERNAL_EVENT_MASK 0xffff0000 typedef uint32_t rb_event_flag_t; typedef void (*rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass); @@ -778,11 +778,11 @@ checkkeyword /** @c setting - @e trace a branch - @j 分岐を trace する + @e fire a coverage event (currently, this is used for line coverage and branch coverage) + @j カバレッジイベントを trace する */ DEFINE_INSN -tracebranch +tracecoverage (rb_num_t nf, VALUE data) () () @@ -350,21 +350,9 @@ finish_iseq_build(rb_iseq_t *iseq) { struct iseq_compile_data *data = ISEQ_COMPILE_DATA(iseq); VALUE err = data->err_info; - unsigned int i; ISEQ_COMPILE_DATA_CLEAR(iseq); compile_data_free(data); - if (ISEQ_COVERAGE(iseq) && ISEQ_LINE_COVERAGE(iseq)) { - for (i = 0; i < iseq->body->insns_info_size; i++) { - if (iseq->body->insns_info[i].events & RUBY_EVENT_LINE) { - int line_no = iseq->body->insns_info[i].line_no - 1; - if (0 <= line_no && line_no < RARRAY_LEN(ISEQ_LINE_COVERAGE(iseq))) { - RARRAY_ASET(ISEQ_LINE_COVERAGE(iseq), line_no, INT2FIX(0)); - } - } - } - } - if (RTEST(err)) { VALUE path = pathobj_path(iseq->body->location.pathobj); if (err == Qtrue) err = rb_exc_new_cstr(rb_eSyntaxError, "compile error"); @@ -5035,7 +5035,7 @@ update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg) if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) { VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES); if (lines) { - long line = rb_sourceline() - 1; + long line = FIX2INT(trace_arg->data) - 1; long count; VALUE num; if (line >= RARRAY_LEN(lines)) { /* no longer tracked */ @@ -5157,7 +5157,7 @@ rb_set_coverages(VALUE coverages, int mode, VALUE me2counter) { GET_VM()->coverages = coverages; GET_VM()->coverage_mode = mode; - rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG); + rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG); if (mode & COVERAGE_TARGET_BRANCHES) { rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG); } @@ -1755,7 +1755,8 @@ RUBY_SYMBOL_EXPORT_BEGIN int rb_thread_check_trap_pending(void); /* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */ -#define RUBY_EVENT_COVERAGE_BRANCH 0x010000 +#define RUBY_EVENT_COVERAGE_LINE 0x010000 +#define RUBY_EVENT_COVERAGE_BRANCH 0x020000 extern VALUE rb_get_coverages(void); extern void rb_set_coverages(VALUE, int, VALUE); |