aboutsummaryrefslogtreecommitdiffstats
path: root/yjit/src/backend/ir.rs
diff options
context:
space:
mode:
authorTakashi Kokubun <takashikkbn@gmail.com>2023-06-12 11:30:25 -0700
committerTakashi Kokubun <takashikkbn@gmail.com>2023-06-12 11:30:25 -0700
commit78ca085785460de46bfc4851a898d525c1698ef8 (patch)
tree9c0c73d916c070a2e6b89265736aa0620ee283ff /yjit/src/backend/ir.rs
parent888ba29e462075472776098f4f95eb6d3df8e730 (diff)
downloadruby-78ca085785460de46bfc4851a898d525c1698ef8.tar.gz
Revert "YJIT: Break register cycles for C arguments (#7918)"
This reverts commit 888ba29e462075472776098f4f95eb6d3df8e730. It caused a CI failure http://ci.rvm.jp/results/trunk-yjit@ruby-sp2-docker/4598881 and I'm investigating it.
Diffstat (limited to 'yjit/src/backend/ir.rs')
-rw-r--r--yjit/src/backend/ir.rs97
1 files changed, 5 insertions, 92 deletions
diff --git a/yjit/src/backend/ir.rs b/yjit/src/backend/ir.rs
index d09bf892ab..8c4a6c1b6d 100644
--- a/yjit/src/backend/ir.rs
+++ b/yjit/src/backend/ir.rs
@@ -72,9 +72,6 @@ pub enum Opnd
// Immediate Ruby value, may be GC'd, movable
Value(VALUE),
- /// C argument register. The alloc_regs resolves its register dependencies.
- CArg(Reg),
-
// Output of a preceding instruction in this block
InsnOut{ idx: usize, num_bits: u8 },
@@ -105,7 +102,6 @@ impl fmt::Debug for Opnd {
match self {
Self::None => write!(fmt, "None"),
Value(val) => write!(fmt, "Value({val:?})"),
- CArg(reg) => write!(fmt, "CArg({reg:?})"),
Stack { idx, sp_offset, .. } => write!(fmt, "SP[{}]", *sp_offset as i32 - idx - 1),
InsnOut { idx, num_bits } => write!(fmt, "Out{num_bits}({idx})"),
Imm(signed) => write!(fmt, "{signed:x}_i64"),
@@ -149,14 +145,6 @@ impl Opnd
Opnd::UImm(ptr as u64)
}
- /// Constructor for a C argument operand
- pub fn c_arg(reg_opnd: Opnd) -> Self {
- match reg_opnd {
- Opnd::Reg(reg) => Opnd::CArg(reg),
- _ => unreachable!(),
- }
- }
-
pub fn is_some(&self) -> bool {
match *self {
Opnd::None => false,
@@ -1236,55 +1224,6 @@ impl Assembler
}
}
- // Reorder C argument moves, sometimes adding extra moves using SCRATCH_REG,
- // so that they will not rewrite each other before they are used.
- fn reorder_c_args(c_args: &Vec<(Reg, Opnd)>) -> Vec<(Reg, Opnd)> {
- // Return the index of a move whose destination is not used as a source if any.
- fn find_safe_arg(c_args: &Vec<(Reg, Opnd)>) -> Option<usize> {
- c_args.iter().enumerate().find(|(_, &(dest_reg, _))| {
- c_args.iter().all(|&(_, src_opnd)| src_opnd != Opnd::Reg(dest_reg))
- }).map(|(index, _)| index)
- }
-
- // Remove moves whose source and destination are the same
- let mut c_args: Vec<(Reg, Opnd)> = c_args.clone().into_iter()
- .filter(|&(reg, opnd)| Opnd::Reg(reg) != opnd).collect();
-
- let mut moves = vec![];
- while c_args.len() > 0 {
- // Keep taking safe moves
- while let Some(index) = find_safe_arg(&c_args) {
- moves.push(c_args.remove(index));
- }
-
- // No safe move. Load the source of one move into SCRATCH_REG, and
- // then load SCRATCH_REG into the destination when it's safe.
- if c_args.len() > 0 {
- // Make sure it's safe to use SCRATCH_REG
- assert!(c_args.iter().all(|&(_, opnd)| opnd != Opnd::Reg(Assembler::SCRATCH_REG)));
-
- // Move SCRATCH <- opnd, and delay reg <- SCRATCH
- let (reg, opnd) = c_args.remove(0);
- moves.push((Assembler::SCRATCH_REG, opnd));
- c_args.push((reg, Opnd::Reg(Assembler::SCRATCH_REG)));
- }
- }
- moves
- }
-
- // Adjust the number of entries in live_ranges so that it can be indexed by mapped indexes.
- fn shift_live_ranges(live_ranges: &mut Vec<usize>, start_index: usize, shift_offset: isize) {
- if shift_offset >= 0 {
- for index in 0..(shift_offset as usize) {
- live_ranges.insert(start_index + index, start_index + index);
- }
- } else {
- for _ in 0..-shift_offset {
- live_ranges.remove(start_index);
- }
- }
- }
-
// Dump live registers for register spill debugging.
fn dump_live_regs(insns: Vec<Insn>, live_ranges: Vec<usize>, num_regs: usize, spill_index: usize) {
// Convert live_ranges to live_regs: the number of live registers at each index
@@ -1308,18 +1247,11 @@ impl Assembler
}
}
- // We may need to reorder LoadInto instructions with a C argument operand.
- // This buffers the operands of such instructions to process them in batches.
- let mut c_args: Vec<(Reg, Opnd)> = vec![];
-
- // live_ranges is indexed by original `index` given by the iterator.
let live_ranges: Vec<usize> = take(&mut self.live_ranges);
- // shifted_live_ranges is indexed by mapped indexes in insn operands.
- let mut shifted_live_ranges: Vec<usize> = live_ranges.clone();
let mut asm = Assembler::new_with_label_names(take(&mut self.label_names), take(&mut self.side_exits));
let mut iterator = self.into_draining_iter();
- while let Some((index, mut insn)) = iterator.next_mapped() {
+ while let Some((index, mut insn)) = iterator.next_unmapped() {
// Check if this is the last instruction that uses an operand that
// spans more than one instruction. In that case, return the
// allocated register to the pool.
@@ -1330,11 +1262,12 @@ impl Assembler
// Since we have an InsnOut, we know it spans more that one
// instruction.
let start_index = *idx;
+ assert!(start_index < index);
// We're going to check if this is the last instruction that
// uses this operand. If it is, we can return the allocated
// register to the pool.
- if shifted_live_ranges[start_index] == index {
+ if live_ranges[start_index] == index {
if let Some(Opnd::Reg(reg)) = asm.insns[start_index].out_opnd() {
dealloc_reg(&mut pool, &regs, reg);
} else {
@@ -1438,27 +1371,7 @@ impl Assembler
}
}
- // Push instruction(s). Batch and reorder C argument operations if needed.
- if let Insn::LoadInto { dest: Opnd::CArg(reg), opnd } = insn {
- // Buffer C arguments
- c_args.push((reg, opnd));
- } else {
- // C arguments are buffered until CCall
- if c_args.len() > 0 {
- // Resolve C argument dependencies
- let c_args_len = c_args.len() as isize;
- let moves = reorder_c_args(&c_args.drain(..).into_iter().collect());
- shift_live_ranges(&mut shifted_live_ranges, asm.insns.len(), moves.len() as isize - c_args_len);
-
- // Push batched C arguments
- for (reg, opnd) in moves {
- asm.load_into(Opnd::Reg(reg), opnd);
- }
- }
- // Other instructions are pushed as is
- asm.push_insn(insn);
- }
- iterator.map_insn_index(&mut asm);
+ asm.push_insn(insn);
}
assert_eq!(pool, 0, "Expected all registers to be returned to the pool");
@@ -1529,7 +1442,7 @@ impl AssemblerDrainingIterator {
/// end of the current list of instructions in order to maintain that
/// alignment.
pub fn map_insn_index(&mut self, asm: &mut Assembler) {
- self.indices.push(asm.insns.len().saturating_sub(1));
+ self.indices.push(asm.insns.len() - 1);
}
/// Map an operand by using this iterator's list of mapped indices.