From 946bb34fb57b9588b93d8435c9a581d0c7ef0019 Mon Sep 17 00:00:00 2001 From: Takashi Kokubun Date: Wed, 2 Nov 2022 08:14:31 -0700 Subject: YJIT: Avoid accumulating freed pages in the payload (#6657) Co-Authored-By: Alan Wu Co-Authored-By: Maxime Chevalier-Boisvert Co-authored-by: Alan Wu Co-authored-by: Maxime Chevalier-Boisvert --- yjit/src/asm/mod.rs | 6 ++++++ yjit/src/core.rs | 35 +++++++++++++++++++++++++++-------- 2 files changed, 33 insertions(+), 8 deletions(-) diff --git a/yjit/src/asm/mod.rs b/yjit/src/asm/mod.rs index 7ac3625fbd..6b2dd7da9a 100644 --- a/yjit/src/asm/mod.rs +++ b/yjit/src/asm/mod.rs @@ -6,6 +6,8 @@ use std::rc::Rc; use crate::backend::x86_64::JMP_PTR_BYTES; #[cfg(target_arch = "aarch64")] use crate::backend::arm64::JMP_PTR_BYTES; +use crate::core::IseqPayload; +use crate::core::for_each_off_stack_iseq_payload; use crate::core::for_each_on_stack_iseq_payload; use crate::invariants::rb_yjit_tracing_invalidate_all; use crate::stats::incr_counter; @@ -571,6 +573,10 @@ impl CodeBlock { let mut freed_pages: Vec = pages_in_use.iter().enumerate() .filter(|&(_, &in_use)| !in_use).map(|(page, _)| page).collect(); self.free_pages(&freed_pages); + // Avoid accumulating freed pages for future code GC + for_each_off_stack_iseq_payload(|iseq_payload: &mut IseqPayload| { + iseq_payload.pages.clear(); + }); // Append virtual pages in case RubyVM::YJIT.code_gc is manually triggered. let mut virtual_pages: Vec = (self.num_mapped_pages()..self.num_virtual_pages()).collect(); diff --git a/yjit/src/core.rs b/yjit/src/core.rs index dd28c2361e..c0e48e87b2 100644 --- a/yjit/src/core.rs +++ b/yjit/src/core.rs @@ -552,20 +552,39 @@ pub fn for_each_iseq(mut callback: F) { unsafe { rb_yjit_for_each_iseq(Some(callback_wrapper), (&mut data) as *mut _ as *mut c_void) }; } +/// Iterate over all on-stack ISEQs +pub fn for_each_on_stack_iseq(mut callback: F) { + unsafe extern "C" fn callback_wrapper(iseq: IseqPtr, data: *mut c_void) { + let callback: &mut &mut dyn FnMut(IseqPtr) -> bool = std::mem::transmute(&mut *data); + callback(iseq); + } + let mut data: &mut dyn FnMut(IseqPtr) = &mut callback; + unsafe { rb_jit_cont_each_iseq(Some(callback_wrapper), (&mut data) as *mut _ as *mut c_void) }; +} + /// Iterate over all on-stack ISEQ payloads -#[cfg(not(test))] pub fn for_each_on_stack_iseq_payload(mut callback: F) { - unsafe extern "C" fn callback_wrapper(iseq: IseqPtr, data: *mut c_void) { - let callback: &mut &mut dyn FnMut(&IseqPayload) -> bool = std::mem::transmute(&mut *data); + for_each_on_stack_iseq(|iseq| { if let Some(iseq_payload) = get_iseq_payload(iseq) { callback(iseq_payload); } - } - let mut data: &mut dyn FnMut(&IseqPayload) = &mut callback; - unsafe { rb_jit_cont_each_iseq(Some(callback_wrapper), (&mut data) as *mut _ as *mut c_void) }; + }); +} + +/// Iterate over all NOT on-stack ISEQ payloads +pub fn for_each_off_stack_iseq_payload(mut callback: F) { + let mut on_stack_iseqs: Vec = vec![]; + for_each_on_stack_iseq(|iseq| { + on_stack_iseqs.push(iseq); + }); + for_each_iseq(|iseq| { + if !on_stack_iseqs.contains(&iseq) { + if let Some(iseq_payload) = get_iseq_payload(iseq) { + callback(iseq_payload); + } + } + }) } -#[cfg(test)] -pub fn for_each_on_stack_iseq_payload(mut _callback: F) {} /// Free the per-iseq payload #[no_mangle] -- cgit v1.2.3