aboutsummaryrefslogtreecommitdiffstats
path: root/thread_pthread.c
diff options
context:
space:
mode:
authorJean Boussier <jean.boussier@gmail.com>2022-06-15 14:37:41 +0200
committerJean Boussier <jean.boussier@gmail.com>2022-06-17 09:08:26 +0200
commitb6c1e1158d71b533b255ae7a2731598455918071 (patch)
tree9b3fdf4eb4a54e56d77ec7732ff079bdd9f896bc /thread_pthread.c
parent20d4168250fb1f534cf2db7c44998b25252a70f8 (diff)
downloadruby-b6c1e1158d71b533b255ae7a2731598455918071.tar.gz
GVL Instrumentation API: add STARTED and EXITED events
[Feature #18339] After experimenting with the initial version of the API I figured there is a need for an exit event to cleanup instrumentation data. e.g. if you record data in a {thread_id -> data} table, you need to free associated data when a thread goes away.
Diffstat (limited to 'thread_pthread.c')
-rw-r--r--thread_pthread.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/thread_pthread.c b/thread_pthread.c
index 1f5b5b9030..da0efeb99b 100644
--- a/thread_pthread.c
+++ b/thread_pthread.c
@@ -109,6 +109,8 @@ struct rb_internal_thread_event_hook {
static rb_internal_thread_event_hook_t *rb_internal_thread_event_hooks = NULL;
static pthread_rwlock_t rb_internal_thread_event_hooks_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
+#define RB_INTERNAL_THREAD_HOOK(event) if (rb_internal_thread_event_hooks) { rb_thread_execute_hooks(event); }
+
rb_internal_thread_event_hook_t *
rb_internal_thread_add_event_hook(rb_internal_thread_event_callback callback, rb_event_flag_t internal_event, void *user_data)
{
@@ -377,10 +379,7 @@ thread_sched_to_ready_common(struct rb_thread_sched *sched, rb_thread_t *th)
static void
thread_sched_to_running_common(struct rb_thread_sched *sched, rb_thread_t *th)
{
- if (rb_internal_thread_event_hooks) {
- rb_thread_execute_hooks(RUBY_INTERNAL_THREAD_EVENT_READY);
- }
-
+ RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_READY);
if (sched->running) {
VM_ASSERT(th->unblock.func == 0 &&
"we must not be in ubf_list and GVL readyq at the same time");
@@ -412,9 +411,7 @@ thread_sched_to_running_common(struct rb_thread_sched *sched, rb_thread_t *th)
// ready -> running
sched->running = th;
- if (rb_internal_thread_event_hooks) {
- rb_thread_execute_hooks(RUBY_INTERNAL_THREAD_EVENT_RESUMED);
- }
+ RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_RESUMED);
if (!sched->timer) {
if (!designate_timer_thread(sched) && !ubf_threads_empty()) {
@@ -434,10 +431,6 @@ thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
static rb_thread_t *
thread_sched_to_waiting_common(struct rb_thread_sched *sched)
{
- if (rb_internal_thread_event_hooks) {
- rb_thread_execute_hooks(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED);
- }
-
rb_thread_t *next;
sched->running = NULL;
next = ccan_list_top(&sched->readyq, rb_thread_t, sched.node.readyq);
@@ -449,12 +442,20 @@ thread_sched_to_waiting_common(struct rb_thread_sched *sched)
static void
thread_sched_to_waiting(struct rb_thread_sched *sched)
{
+ RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED);
rb_native_mutex_lock(&sched->lock);
thread_sched_to_waiting_common(sched);
rb_native_mutex_unlock(&sched->lock);
}
static void
+thread_sched_to_dead(struct rb_thread_sched *sched)
+{
+ thread_sched_to_waiting(sched);
+ RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_EXITED);
+}
+
+static void
thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
{
rb_thread_t *next;
@@ -1173,6 +1174,8 @@ thread_start_func_1(void *th_ptr)
native_thread_init(th->nt);
+ RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_STARTED);
+
/* run */
#if defined USE_NATIVE_THREAD_INIT
thread_start_func_2(th, th->ec->machine.stack_start);