aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlan Wu <XrXr@users.noreply.github.com>2021-08-26 14:58:13 -0400
committerAlan Wu <XrXr@users.noreply.github.com>2021-10-20 18:19:39 -0400
commit54db64f7a59f1f14b280a80491336a128f792b42 (patch)
tree10c897bde6a2d600a869e3982f2716f6062f1d21
parent4b815abb373801b94d2471997f1467421620c1e5 (diff)
downloadruby-54db64f7a59f1f14b280a80491336a128f792b42.tar.gz
filter out internal events. add comments. reorder
-rw-r--r--vm_trace.c9
-rw-r--r--yjit_codegen.c11
2 files changed, 15 insertions, 5 deletions
diff --git a/vm_trace.c b/vm_trace.c
index d9f0cce563..c410b709b5 100644
--- a/vm_trace.c
+++ b/vm_trace.c
@@ -98,11 +98,16 @@ update_global_event_hook(rb_event_flag_t vm_events)
rb_clear_attr_ccs();
}
- yjit_tracing_invalidate_all();
-
ruby_vm_event_flags = vm_events;
ruby_vm_event_enabled_global_flags |= vm_events;
rb_objspace_set_event_hook(vm_events);
+
+ if (vm_events & RUBY_EVENT_TRACEPOINT_ALL) {
+ // Invalidate all code if listening for any TracePoint event.
+ // Internal events fire inside C routines so don't need special handling.
+ // Do this last so other ractors see updated vm events when they wake up.
+ yjit_tracing_invalidate_all();
+ }
}
/* add/remove hooks */
diff --git a/yjit_codegen.c b/yjit_codegen.c
index 41670189b7..db0694523e 100644
--- a/yjit_codegen.c
+++ b/yjit_codegen.c
@@ -3842,12 +3842,17 @@ static void invalidate_all_blocks_for_tracing(const rb_iseq_t *iseq);
// they are waiting for a return from a C routine. For every routine call, we
// patch in an exit after the body of the containing VM instruction. This makes
// it so all the invalidated code exit as soon as execution logically reaches
-// the next VM instruction.
+// the next VM instruction. The interpreter takes care of firing the tracing
+// event if it so happens that the next VM instruction has one attached.
+//
// The c_return event needs special handling as our codegen never outputs code
// that contains tracing logic. If we let the normal output code run until the
// start of the next VM instruction by relying on the patching scheme above, we
-// would fail to fire the c_return event. To handle it, we patch in the full
-// logic at the return address. See full_cfunc_return().
+// would fail to fire the c_return event. The interpreter doesn't fire the
+// event at an instruction boundary, so simply exiting to the interpreter isn't
+// enough. To handle it, we patch in the full logic at the return address. See
+// full_cfunc_return().
+//
// In addition to patching, we prevent future entries into invalidated code by
// removing all live blocks from their iseq.
void