aboutsummaryrefslogtreecommitdiffstats
path: root/yjit/src/asm/arm64
diff options
context:
space:
mode:
authorKevin Newton <kddnewton@gmail.com>2022-06-22 12:27:59 -0400
committerTakashi Kokubun <takashikkbn@gmail.com>2022-08-29 08:46:58 -0700
commitb272c57f27628ab114206c777d5b274713d31079 (patch)
tree4f490b317586ebbfc46d2142c4bde2fdb09f45e2 /yjit/src/asm/arm64
parentd9163280782086b57119abc9478580a6b3efd2c3 (diff)
downloadruby-b272c57f27628ab114206c777d5b274713d31079.tar.gz
LSL, LSR, B.cond (https://github.com/Shopify/ruby/pull/303)
* LSL and LSR * B.cond * Move A64 files around to make more sense * offset -> byte_offset for bcond
Diffstat (limited to 'yjit/src/asm/arm64')
-rw-r--r--yjit/src/asm/arm64/README.md6
-rw-r--r--yjit/src/asm/arm64/arg/bitmask_imm.rs (renamed from yjit/src/asm/arm64/inst/bitmask_imm.rs)0
-rw-r--r--yjit/src/asm/arm64/arg/condition.rs20
-rw-r--r--yjit/src/asm/arm64/arg/mod.rs10
-rw-r--r--yjit/src/asm/arm64/arg/sf.rs (renamed from yjit/src/asm/arm64/inst/sf.rs)0
-rw-r--r--yjit/src/asm/arm64/inst/branch_cond.rs73
-rw-r--r--yjit/src/asm/arm64/inst/data_imm.rs2
-rw-r--r--yjit/src/asm/arm64/inst/data_reg.rs2
-rw-r--r--yjit/src/asm/arm64/inst/logical_imm.rs3
-rw-r--r--yjit/src/asm/arm64/inst/logical_reg.rs2
-rw-r--r--yjit/src/asm/arm64/inst/mod.rs494
-rw-r--r--yjit/src/asm/arm64/inst/mov.rs2
-rw-r--r--yjit/src/asm/arm64/inst/shift_imm.rs147
-rw-r--r--yjit/src/asm/arm64/mod.rs527
14 files changed, 801 insertions, 487 deletions
diff --git a/yjit/src/asm/arm64/README.md b/yjit/src/asm/arm64/README.md
index 3d0ec57d34..edae5773e8 100644
--- a/yjit/src/asm/arm64/README.md
+++ b/yjit/src/asm/arm64/README.md
@@ -4,11 +4,9 @@ This module is responsible for encoding YJIT operands into an appropriate Arm64
## Architecture
-Every instruction in the Arm64 instruction set is 32 bits wide and is represented in little-endian order. Because they're all going to the same size, we represent each instruction by a struct that implements `From<T> for u32`, which contains the mechanism for encoding each instruction.
+Every instruction in the Arm64 instruction set is 32 bits wide and is represented in little-endian order. Because they're all going to the same size, we represent each instruction by a struct that implements `From<T> for u32`, which contains the mechanism for encoding each instruction. The encoding for each instruction is shown in the documentation for the struct that ends up being created.
-Generally each set of instructions falls under a certain family (like data processing -- register). These instructions are encoded similarly, so we group them into their own submodules. The encoding for each type is shown in the documentation for the struct that ends up being created.
-
-In general each set of bytes inside of the struct has either a direct value (usually a `u8`/`u16`) or some kind of `enum` that can be converted directly into a `u32`.
+In general each set of bytes inside of the struct has either a direct value (usually a `u8`/`u16`) or some kind of `enum` that can be converted directly into a `u32`. For more complicated pieces of encoding (e.g., bitmask immediates) a corresponding module under the `arg` namespace is available.
## Helpful links
diff --git a/yjit/src/asm/arm64/inst/bitmask_imm.rs b/yjit/src/asm/arm64/arg/bitmask_imm.rs
index 7e5a21c7b4..7e5a21c7b4 100644
--- a/yjit/src/asm/arm64/inst/bitmask_imm.rs
+++ b/yjit/src/asm/arm64/arg/bitmask_imm.rs
diff --git a/yjit/src/asm/arm64/arg/condition.rs b/yjit/src/asm/arm64/arg/condition.rs
new file mode 100644
index 0000000000..db269726d7
--- /dev/null
+++ b/yjit/src/asm/arm64/arg/condition.rs
@@ -0,0 +1,20 @@
+/// Various instructions in A64 can have condition codes attached. This enum
+/// includes all of the various kinds of conditions along with their respective
+/// encodings.
+pub enum Condition {
+ EQ = 0b0000, // equal to
+ NE = 0b0001, // not equal to
+ CS = 0b0010, // carry set (alias for HS)
+ CC = 0b0011, // carry clear (alias for LO)
+ MI = 0b0100, // minus, negative
+ PL = 0b0101, // positive or zero
+ VS = 0b0110, // signed overflow
+ VC = 0b0111, // no signed overflow
+ HI = 0b1000, // greater than (unsigned)
+ LS = 0b1001, // less than or equal to (unsigned)
+ GE = 0b1010, // greater than or equal to (signed)
+ LT = 0b1011, // less than (signed)
+ GT = 0b1100, // greater than (signed)
+ LE = 0b1101, // less than or equal to (signed)
+ AL = 0b1110, // always
+}
diff --git a/yjit/src/asm/arm64/arg/mod.rs b/yjit/src/asm/arm64/arg/mod.rs
new file mode 100644
index 0000000000..0d2f1ac28a
--- /dev/null
+++ b/yjit/src/asm/arm64/arg/mod.rs
@@ -0,0 +1,10 @@
+// This module contains various A64 instruction arguments and the logic
+// necessary to encode them.
+
+mod bitmask_imm;
+mod condition;
+mod sf;
+
+pub use bitmask_imm::BitmaskImmediate;
+pub use condition::Condition;
+pub use sf::Sf;
diff --git a/yjit/src/asm/arm64/inst/sf.rs b/yjit/src/asm/arm64/arg/sf.rs
index c2fd33302c..c2fd33302c 100644
--- a/yjit/src/asm/arm64/inst/sf.rs
+++ b/yjit/src/asm/arm64/arg/sf.rs
diff --git a/yjit/src/asm/arm64/inst/branch_cond.rs b/yjit/src/asm/arm64/inst/branch_cond.rs
new file mode 100644
index 0000000000..21fdda5d3f
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/branch_cond.rs
@@ -0,0 +1,73 @@
+use super::super::arg::Condition;
+
+/// The struct that represents an A64 conditional branch instruction that can be
+/// encoded.
+///
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 0 1 0 1 0 1 0 0 0 |
+/// | imm19........................................................... cond....... |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct BranchCond {
+ /// The kind of condition to check before branching.
+ cond: Condition,
+
+ /// The instruction offset from this instruction to branch to.
+ imm19: i32
+}
+
+impl BranchCond {
+ /// B.cond
+ /// https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/B-cond--Branch-conditionally-
+ pub fn bcond(cond: Condition, byte_offset: i32) -> Self {
+ Self { cond, imm19: byte_offset >> 2 }
+ }
+}
+
+/// https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Branches--Exception-Generating-and-System-instructions?lang=en
+const FAMILY: u32 = 0b101;
+
+impl From<BranchCond> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: BranchCond) -> Self {
+ let imm19 = (inst.imm19 as u32) & ((1 << 19) - 1);
+
+ 0
+ | (1 << 30)
+ | (FAMILY << 26)
+ | (imm19 << 5)
+ | (inst.cond as u32)
+ }
+}
+
+impl From<BranchCond> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: BranchCond) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_b_eq() {
+ let result: u32 = BranchCond::bcond(Condition::EQ, 128).into();
+ assert_eq!(0x54000400, result);
+ }
+
+ #[test]
+ fn test_b_vs() {
+ let result: u32 = BranchCond::bcond(Condition::VS, 128).into();
+ assert_eq!(0x54000406, result);
+ }
+
+ #[test]
+ fn test_b_ne_neg() {
+ let result: u32 = BranchCond::bcond(Condition::NE, -128).into();
+ assert_eq!(0x54fffc01, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/inst/data_imm.rs b/yjit/src/asm/arm64/inst/data_imm.rs
index 950cf3421e..19e2bfa199 100644
--- a/yjit/src/asm/arm64/inst/data_imm.rs
+++ b/yjit/src/asm/arm64/inst/data_imm.rs
@@ -1,4 +1,4 @@
-use super::sf::Sf;
+use super::super::arg::Sf;
/// The operation being performed by this instruction.
enum Op {
diff --git a/yjit/src/asm/arm64/inst/data_reg.rs b/yjit/src/asm/arm64/inst/data_reg.rs
index 40f026d1fd..e2c2723fcf 100644
--- a/yjit/src/asm/arm64/inst/data_reg.rs
+++ b/yjit/src/asm/arm64/inst/data_reg.rs
@@ -1,4 +1,4 @@
-use super::sf::Sf;
+use super::super::arg::Sf;
/// The operation being performed by this instruction.
enum Op {
diff --git a/yjit/src/asm/arm64/inst/logical_imm.rs b/yjit/src/asm/arm64/inst/logical_imm.rs
index 88de8ba4a1..cc2a16cbdc 100644
--- a/yjit/src/asm/arm64/inst/logical_imm.rs
+++ b/yjit/src/asm/arm64/inst/logical_imm.rs
@@ -1,5 +1,4 @@
-use super::bitmask_imm::BitmaskImmediate;
-use super::sf::Sf;
+use super::super::arg::{BitmaskImmediate, Sf};
// Which operation to perform.
enum Opc {
diff --git a/yjit/src/asm/arm64/inst/logical_reg.rs b/yjit/src/asm/arm64/inst/logical_reg.rs
index 929d80b1a7..3feb3350ab 100644
--- a/yjit/src/asm/arm64/inst/logical_reg.rs
+++ b/yjit/src/asm/arm64/inst/logical_reg.rs
@@ -1,4 +1,4 @@
-use super::sf::Sf;
+use super::super::arg::Sf;
/// The type of shift to perform on the second operand register.
enum Shift {
diff --git a/yjit/src/asm/arm64/inst/mod.rs b/yjit/src/asm/arm64/inst/mod.rs
index 7d05f28604..2f0e708999 100644
--- a/yjit/src/asm/arm64/inst/mod.rs
+++ b/yjit/src/asm/arm64/inst/mod.rs
@@ -1,6 +1,9 @@
+// This module contains various A64 instructions and the logic necessary to
+// encode them into u32s.
+
mod atomic;
-mod bitmask_imm;
mod branch;
+mod branch_cond;
mod call;
mod data_imm;
mod data_reg;
@@ -8,481 +11,18 @@ mod load;
mod logical_imm;
mod logical_reg;
mod mov;
-mod sf;
+mod shift_imm;
mod store;
-use core::num;
-
-use atomic::Atomic;
-use branch::Branch;
-use call::Call;
-use data_imm::DataImm;
-use data_reg::DataReg;
-use load::Load;
-use logical_imm::LogicalImm;
-use logical_reg::LogicalReg;
-use mov::Mov;
-use store::Store;
-
-use crate::asm::CodeBlock;
-use super::opnd::*;
-
-/// Checks that a signed value fits within the specified number of bits.
-const fn imm_fits_bits(imm: i64, num_bits: u8) -> bool {
- let minimum = if num_bits == 64 { i64::MIN } else { -2_i64.pow((num_bits as u32) - 1) };
- let maximum = if num_bits == 64 { i64::MAX } else { 2_i64.pow((num_bits as u32) - 1) - 1 };
-
- imm >= minimum && imm <= maximum
-}
-
-/// Checks that an unsigned value fits within the specified number of bits.
-const fn uimm_fits_bits(uimm: u64, num_bits: u8) -> bool {
- let maximum = if num_bits == 64 { u64::MAX } else { 2_u64.pow(num_bits as u32) - 1 };
-
- uimm <= maximum
-}
-
-/// ADD - add rn and rm, put the result in rd, don't update flags
-pub fn add(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
- let bytes: [u8; 4] = match (rd, rn, rm) {
- (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
- assert!(
- rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
- "All operands must be of the same size."
- );
-
- DataReg::add(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
- },
- (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(imm12)) => {
- assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
- assert!(uimm_fits_bits(imm12, 12), "The immediate operand must be 12 bits or less.");
-
- DataImm::add(rd.reg_no, rn.reg_no, imm12 as u16, rd.num_bits).into()
- },
- _ => panic!("Invalid operand combination to add instruction."),
- };
-
- cb.write_bytes(&bytes);
-}
-
-/// ADDS - add rn and rm, put the result in rd, update flags
-pub fn adds(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
- let bytes: [u8; 4] = match (rd, rn, rm) {
- (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
- assert!(
- rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
- "All operands must be of the same size."
- );
-
- DataReg::adds(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
- },
- (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(imm12)) => {
- assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
- assert!(uimm_fits_bits(imm12, 12), "The immediate operand must be 12 bits or less.");
-
- DataImm::adds(rd.reg_no, rn.reg_no, imm12 as u16, rd.num_bits).into()
- },
- _ => panic!("Invalid operand combination to adds instruction."),
- };
-
- cb.write_bytes(&bytes);
-}
-
-/// AND - and rn and rm, put the result in rd, don't update flags
-pub fn and(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
- let bytes: [u8; 4] = match (rd, rn, rm) {
- (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
- assert!(
- rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
- "All operands must be of the same size."
- );
-
- LogicalReg::and(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
- },
- (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(imm)) => {
- assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
-
- LogicalImm::and(rd.reg_no, rn.reg_no, imm.try_into().unwrap(), rd.num_bits).into()
- },
- _ => panic!("Invalid operand combination to and instruction."),
- };
-
- cb.write_bytes(&bytes);
-}
-
-/// ANDS - and rn and rm, put the result in rd, update flags
-pub fn ands(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
- let bytes: [u8; 4] = match (rd, rn, rm) {
- (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
- assert!(
- rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
- "All operands must be of the same size."
- );
-
- LogicalReg::ands(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
- },
- (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(imm)) => {
- assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
-
- LogicalImm::ands(rd.reg_no, rn.reg_no, imm.try_into().unwrap(), rd.num_bits).into()
- },
- _ => panic!("Invalid operand combination to ands instruction."),
- };
-
- cb.write_bytes(&bytes);
-}
-
-/// BL - branch with link (offset is number of instructions to jump)
-pub fn bl(cb: &mut CodeBlock, imm26: A64Opnd) {
- let bytes: [u8; 4] = match imm26 {
- A64Opnd::Imm(imm26) => {
- assert!(imm_fits_bits(imm26, 26), "The immediate operand must be 26 bits or less.");
-
- Call::bl(imm26 as i32).into()
- },
- _ => panic!("Invalid operand combination to bl instruction.")
- };
-
- cb.write_bytes(&bytes);
-}
-
-/// BR - branch to a register
-pub fn br(cb: &mut CodeBlock, rn: A64Opnd) {
- let bytes: [u8; 4] = match rn {
- A64Opnd::Reg(rn) => Branch::br(rn.reg_no).into(),
- _ => panic!("Invalid operand to br instruction."),
- };
-
- cb.write_bytes(&bytes);
-}
-
-/// CMP - compare rn and rm, update flags
-pub fn cmp(cb: &mut CodeBlock, rn: A64Opnd, rm: A64Opnd) {
- let bytes: [u8; 4] = match (rn, rm) {
- (A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
- assert!(
- rn.num_bits == rm.num_bits,
- "All operands must be of the same size."
- );
-
- DataReg::cmp(rn.reg_no, rm.reg_no, rn.num_bits).into()
- },
- (A64Opnd::Reg(rn), A64Opnd::UImm(imm12)) => {
- assert!(uimm_fits_bits(imm12, 12), "The immediate operand must be 12 bits or less.");
-
- DataImm::cmp(rn.reg_no, imm12 as u16, rn.num_bits).into()
- },
- _ => panic!("Invalid operand combination to cmp instruction."),
- };
-
- cb.write_bytes(&bytes);
-}
-
-/// LDADDAL - atomic add with acquire and release semantics
-pub fn ldaddal(cb: &mut CodeBlock, rs: A64Opnd, rt: A64Opnd, rn: A64Opnd) {
- let bytes: [u8; 4] = match (rs, rt, rn) {
- (A64Opnd::Reg(rs), A64Opnd::Reg(rt), A64Opnd::Reg(rn)) => {
- assert!(
- rs.num_bits == rt.num_bits && rt.num_bits == rn.num_bits,
- "All operands must be of the same size."
- );
-
- Atomic::ldaddal(rs.reg_no, rt.reg_no, rn.reg_no, rs.num_bits).into()
- },
- _ => panic!("Invalid operand combination to ldaddal instruction."),
- };
-
- cb.write_bytes(&bytes);
-}
-
-/// LDUR - load a memory address into a register
-pub fn ldur(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
- let bytes: [u8; 4] = match (rt, rn) {
- (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
- assert!(rt.num_bits == rn.num_bits, "Expected registers to be the same size");
- assert!(imm_fits_bits(rn.disp.into(), 9), "Expected displacement to be 9 bits or less");
-
- Load::ldur(rt.reg_no, rn.base_reg_no, rn.disp as i16, rt.num_bits).into()
- },
- _ => panic!("Invalid operands for LDUR")
- };
-
- cb.write_bytes(&bytes);
-}
-
-/// MOVK - move a 16 bit immediate into a register, keep the other bits in place
-pub fn movk(cb: &mut CodeBlock, rd: A64Opnd, imm16: A64Opnd, shift: u8) {
- let bytes: [u8; 4] = match (rd, imm16) {
- (A64Opnd::Reg(rd), A64Opnd::UImm(imm16)) => {
- assert!(uimm_fits_bits(imm16, 16), "The immediate operand must be 16 bits or less.");
-
- Mov::movk(rd.reg_no, imm16 as u16, shift, rd.num_bits).into()
- },
- _ => panic!("Invalid operand combination to movk instruction.")
- };
-
- cb.write_bytes(&bytes);
-}
-
-/// MOVZ - move a 16 bit immediate into a register, zero the other bits
-pub fn movz(cb: &mut CodeBlock, rd: A64Opnd, imm16: A64Opnd, shift: u8) {
- let bytes: [u8; 4] = match (rd, imm16) {
- (A64Opnd::Reg(rd), A64Opnd::UImm(imm16)) => {
- assert!(uimm_fits_bits(imm16, 16), "The immediate operand must be 16 bits or less.");
-
- Mov::movz(rd.reg_no, imm16 as u16, shift, rd.num_bits).into()
- },
- _ => panic!("Invalid operand combination to movz instruction.")
- };
-
- cb.write_bytes(&bytes);
-}
-
-/// STUR - store a value in a register at a memory address
-pub fn stur(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
- let bytes: [u8; 4] = match (rt, rn) {
- (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
- assert!(rt.num_bits == rn.num_bits, "Expected registers to be the same size");
- assert!(imm_fits_bits(rn.disp.into(), 9), "Expected displacement to be 9 bits or less");
-
- Store::stur(rt.reg_no, rn.base_reg_no, rn.disp as i16, rt.num_bits).into()
- },
- _ => panic!("Invalid operand combination to stur instruction.")
- };
-
- cb.write_bytes(&bytes);
-}
-
-/// SUB - subtract rm from rn, put the result in rd, don't update flags
-pub fn sub(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
- let bytes: [u8; 4] = match (rd, rn, rm) {
- (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
- assert!(
- rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
- "All operands must be of the same size."
- );
-
- DataReg::sub(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
- },
- (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(imm12)) => {
- assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
- assert!(uimm_fits_bits(imm12, 12), "The immediate operand must be 12 bits or less.");
-
- DataImm::sub(rd.reg_no, rn.reg_no, imm12 as u16, rd.num_bits).into()
- },
- _ => panic!("Invalid operand combination to sub instruction."),
- };
-
- cb.write_bytes(&bytes);
-}
-
-/// SUBS - subtract rm from rn, put the result in rd, update flags
-pub fn subs(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
- let bytes: [u8; 4] = match (rd, rn, rm) {
- (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
- assert!(
- rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
- "All operands must be of the same size."
- );
-
- DataReg::subs(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
- },
- (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(imm12)) => {
- assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
- assert!(uimm_fits_bits(imm12, 12), "The immediate operand must be 12 bits or less.");
-
- DataImm::subs(rd.reg_no, rn.reg_no, imm12 as u16, rd.num_bits).into()
- },
- _ => panic!("Invalid operand combination to subs instruction."),
- };
-
- cb.write_bytes(&bytes);
-}
-
-/// RET - unconditionally return to a location in a register, defaults to X30
-pub fn ret(cb: &mut CodeBlock, rn: A64Opnd) {
- let bytes: [u8; 4] = match rn {
- A64Opnd::None => Branch::ret(30).into(),
- A64Opnd::Reg(reg) => Branch::ret(reg.reg_no).into(),
- _ => panic!("Invalid operand to ret instruction.")
- };
-
- cb.write_bytes(&bytes);
-}
-
-/// TST - test the bits of a register against a mask, then update flags
-pub fn tst(cb: &mut CodeBlock, rn: A64Opnd, rm: A64Opnd) {
- let bytes: [u8; 4] = match (rn, rm) {
- (A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
- assert!(rn.num_bits == rm.num_bits, "All operands must be of the same size.");
-
- LogicalReg::tst(rn.reg_no, rm.reg_no, rn.num_bits).into()
- },
- (A64Opnd::Reg(rn), A64Opnd::UImm(imm)) => {
- LogicalImm::tst(rn.reg_no, imm.try_into().unwrap(), rn.num_bits).into()
- },
- _ => panic!("Invalid operand combination to tst instruction."),
- };
-
- cb.write_bytes(&bytes);
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- /// Check that the bytes for an instruction sequence match a hex string
- fn check_bytes<R>(bytes: &str, run: R) where R: FnOnce(&mut super::CodeBlock) {
- let mut cb = super::CodeBlock::new_dummy(128);
- run(&mut cb);
- assert_eq!(format!("{:x}", cb), bytes);
- }
-
- #[test]
- fn test_imm_fits_bits() {
- assert!(imm_fits_bits(i8::MAX.into(), 8));
- assert!(imm_fits_bits(i8::MIN.into(), 8));
-
- assert!(imm_fits_bits(i16::MAX.into(), 16));
- assert!(imm_fits_bits(i16::MIN.into(), 16));
-
- assert!(imm_fits_bits(i32::MAX.into(), 32));
- assert!(imm_fits_bits(i32::MIN.into(), 32));
-
- assert!(imm_fits_bits(i64::MAX.into(), 64));
- assert!(imm_fits_bits(i64::MIN.into(), 64));
- }
-
- #[test]
- fn test_uimm_fits_bits() {
- assert!(uimm_fits_bits(u8::MAX.into(), 8));
- assert!(uimm_fits_bits(u16::MAX.into(), 16));
- assert!(uimm_fits_bits(u32::MAX.into(), 32));
- assert!(uimm_fits_bits(u64::MAX.into(), 64));
- }
-
- #[test]
- fn test_add_register() {
- check_bytes("2000028b", |cb| add(cb, X0, X1, X2));
- }
-
- #[test]
- fn test_add_immediate() {
- check_bytes("201c0091", |cb| add(cb, X0, X1, A64Opnd::new_uimm(7)));
- }
-
- #[test]
- fn test_adds_register() {
- check_bytes("200002ab", |cb| adds(cb, X0, X1, X2));
- }
-
- #[test]
- fn test_adds_immediate() {
- check_bytes("201c00b1", |cb| adds(cb, X0, X1, A64Opnd::new_uimm(7)));
- }
-
- #[test]
- fn test_and_register() {
- check_bytes("2000028a", |cb| and(cb, X0, X1, X2));
- }
-
- #[test]
- fn test_and_immediate() {
- check_bytes("20084092", |cb| and(cb, X0, X1, A64Opnd::new_uimm(7)));
- }
-
- #[test]
- fn test_ands_register() {
- check_bytes("200002ea", |cb| ands(cb, X0, X1, X2));
- }
-
- #[test]
- fn test_ands_immediate() {
- check_bytes("200840f2", |cb| ands(cb, X0, X1, A64Opnd::new_uimm(7)));
- }
-
- #[test]
- fn test_bl() {
- check_bytes("00040094", |cb| bl(cb, A64Opnd::new_imm(1024)));
- }
-
- #[test]
- fn test_br() {
- check_bytes("80021fd6", |cb| br(cb, X20));
- }
-
- #[test]
- fn test_cmp_register() {
- check_bytes("5f010beb", |cb| cmp(cb, X10, X11));
- }
-
- #[test]
- fn test_cmp_immediate() {
- check_bytes("5f3900f1", |cb| cmp(cb, X10, A64Opnd::new_uimm(14)));
- }
-
- #[test]
- fn test_ldaddal() {
- check_bytes("8b01eaf8", |cb| ldaddal(cb, X10, X11, X12));
- }
-
- #[test]
- fn test_ldur() {
- check_bytes("20b047f8", |cb| ldur(cb, X0, A64Opnd::new_mem(X1, 123)));
- }
-
- #[test]
- fn test_movk() {
- check_bytes("600fa0f2", |cb| movk(cb, X0, A64Opnd::new_uimm(123), 16));
- }
-
- #[test]
- fn test_movz() {
- check_bytes("600fa0d2", |cb| movz(cb, X0, A64Opnd::new_uimm(123), 16));
- }
-
- #[test]
- fn test_ret_none() {
- check_bytes("c0035fd6", |cb| ret(cb, A64Opnd::None));
- }
-
- #[test]
- fn test_ret_register() {
- check_bytes("80025fd6", |cb| ret(cb, X20));
- }
-
- #[test]
- fn test_stur() {
- check_bytes("6a0108f8", |cb| stur(cb, X10, A64Opnd::new_mem(X11, 128)));
- }
-
- #[test]
- fn test_sub_register() {
- check_bytes("200002cb", |cb| sub(cb, X0, X1, X2));
- }
-
- #[test]
- fn test_sub_immediate() {
- check_bytes("201c00d1", |cb| sub(cb, X0, X1, A64Opnd::new_uimm(7)));
- }
-
- #[test]
- fn test_subs_register() {
- check_bytes("200002eb", |cb| subs(cb, X0, X1, X2));
- }
-
- #[test]
- fn test_subs_immediate() {
- check_bytes("201c00f1", |cb| subs(cb, X0, X1, A64Opnd::new_uimm(7)));
- }
-
- #[test]
- fn test_tst_register() {
- check_bytes("1f0001ea", |cb| tst(cb, X0, X1));
- }
-
- #[test]
- fn test_tst_immediate() {
- check_bytes("3f0840f2", |cb| tst(cb, X1, A64Opnd::new_uimm(7)));
- }
-}
+pub use atomic::Atomic;
+pub use branch::Branch;
+pub use branch_cond::BranchCond;
+pub use call::Call;
+pub use data_imm::DataImm;
+pub use data_reg::DataReg;
+pub use load::Load;
+pub use logical_imm::LogicalImm;
+pub use logical_reg::LogicalReg;
+pub use mov::Mov;
+pub use shift_imm::ShiftImm;
+pub use store::Store;
diff --git a/yjit/src/asm/arm64/inst/mov.rs b/yjit/src/asm/arm64/inst/mov.rs
index 0d68ffd206..e7cb9215b0 100644
--- a/yjit/src/asm/arm64/inst/mov.rs
+++ b/yjit/src/asm/arm64/inst/mov.rs
@@ -1,4 +1,4 @@
-use super::sf::Sf;
+use super::super::arg::Sf;
/// Which operation is being performed.
enum Op {
diff --git a/yjit/src/asm/arm64/inst/shift_imm.rs b/yjit/src/asm/arm64/inst/shift_imm.rs
new file mode 100644
index 0000000000..3d2685a997
--- /dev/null
+++ b/yjit/src/asm/arm64/inst/shift_imm.rs
@@ -0,0 +1,147 @@
+use super::super::arg::Sf;
+
+/// The operation to perform for this instruction.
+enum Opc {
+ /// Logical left shift
+ LSL,
+
+ /// Logical shift right
+ LSR
+}
+
+/// The struct that represents an A64 unsigned bitfield move instruction that
+/// can be encoded.
+///
+/// LSL (immediate)
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+/// | 31 30 29 28 | 27 26 25 24 | 23 22 21 20 | 19 18 17 16 | 15 14 13 12 | 11 10 09 08 | 07 06 05 04 | 03 02 01 00 |
+/// | 1 0 1 0 0 1 1 0 |
+/// | sf N immr............... imms............... rn.............. rd.............. |
+/// +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+///
+pub struct ShiftImm {
+ /// The register number of the destination register.
+ rd: u8,
+
+ /// The register number of the first operand register.
+ rn: u8,
+
+ /// The immediate value to shift by.
+ shift: u8,
+
+ /// The opcode for this instruction.
+ opc: Opc,
+
+ /// Whether or not this instruction is operating on 64-bit operands.
+ sf: Sf
+}
+
+impl ShiftImm {
+ /// LSL (immediate)
+ /// https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/LSL--immediate---Logical-Shift-Left--immediate---an-alias-of-UBFM-?lang=en
+ pub fn lsl(rd: u8, rn: u8, shift: u8, num_bits: u8) -> Self {
+ ShiftImm { rd, rn, shift, opc: Opc::LSL, sf: num_bits.into() }
+ }
+
+ /// LSR (immediate)
+ /// https://developer.arm.com/documentation/ddi0602/2021-12/Base-Instructions/LSR--immediate---Logical-Shift-Right--immediate---an-alias-of-UBFM-?lang=en
+ pub fn lsr(rd: u8, rn: u8, shift: u8, num_bits: u8) -> Self {
+ ShiftImm { rd, rn, shift, opc: Opc::LSR, sf: num_bits.into() }
+ }
+
+ /// Returns a triplet of (n, immr, imms) encoded in u32s for this
+ /// instruction. This mirrors how they will be encoded in the actual bits.
+ fn bitmask(&self) -> (u32, u32, u32) {
+ match self.opc {
+ // The key insight is a little buried in the docs, but effectively:
+ // LSL <Wd>, <Wn>, #<shift> == UBFM <Wd>, <Wn>, #(-<shift> MOD 32), #(31-<shift>)
+ // LSL <Xd>, <Xn>, #<shift> == UBFM <Xd>, <Xn>, #(-<shift> MOD 64), #(63-<shift>)
+ Opc::LSL => {
+ let shift = -(self.shift as i16);
+
+ match self.sf {
+ Sf::Sf32 => (
+ 0,
+ (shift.rem_euclid(32) & 0x3f) as u32,
+ ((31 - self.shift) & 0x3f) as u32
+ ),
+ Sf::Sf64 => (
+ 1,
+ (shift.rem_euclid(64) & 0x3f) as u32,
+ ((63 - self.shift) & 0x3f) as u32
+ )
+ }
+ },
+ // Similar to LSL:
+ // LSR <Wd>, <Wn>, #<shift> == UBFM <Wd>, <Wn>, #<shift>, #31
+ // LSR <Xd>, <Xn>, #<shift> == UBFM <Xd>, <Xn>, #<shift>, #63
+ Opc::LSR => {
+ match self.sf {
+ Sf::Sf32 => (0, (self.shift & 0x3f) as u32, 31),
+ Sf::Sf64 => (1, (self.shift & 0x3f) as u32, 63)
+ }
+ }
+ }
+ }
+}
+
+/// https://developer.arm.com/documentation/ddi0602/2022-03/Index-by-Encoding/Data-Processing----Immediate?lang=en#bitfield
+const FAMILY: u32 = 0b10011;
+
+impl From<ShiftImm> for u32 {
+ /// Convert an instruction into a 32-bit value.
+ fn from(inst: ShiftImm) -> Self {
+ let (n, immr, imms) = inst.bitmask();
+
+ 0
+ | ((inst.sf as u32) << 31)
+ | (1 << 30)
+ | (FAMILY << 24)
+ | (n << 22)
+ | (immr << 16)
+ | (imms << 10)
+ | ((inst.rn as u32) << 5)
+ | inst.rd as u32
+ }
+}
+
+impl From<ShiftImm> for [u8; 4] {
+ /// Convert an instruction into a 4 byte array.
+ fn from(inst: ShiftImm) -> [u8; 4] {
+ let result: u32 = inst.into();
+ result.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_lsl_32() {
+ let inst = ShiftImm::lsl(0, 1, 7, 32);
+ let result: u32 = inst.into();
+ assert_eq!(0x53196020, result);
+ }
+
+ #[test]
+ fn test_lsl_64() {
+ let inst = ShiftImm::lsl(0, 1, 7, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xd379e020, result);
+ }
+
+ #[test]
+ fn test_lsr_32() {
+ let inst = ShiftImm::lsr(0, 1, 7, 32);
+ let result: u32 = inst.into();
+ assert_eq!(0x53077c20, result);
+ }
+
+ #[test]
+ fn test_lsr_64() {
+ let inst = ShiftImm::lsr(0, 1, 7, 64);
+ let result: u32 = inst.into();
+ assert_eq!(0xd347fc20, result);
+ }
+}
diff --git a/yjit/src/asm/arm64/mod.rs b/yjit/src/asm/arm64/mod.rs
index 85a472ddec..24f349d589 100644
--- a/yjit/src/asm/arm64/mod.rs
+++ b/yjit/src/asm/arm64/mod.rs
@@ -1,4 +1,531 @@
#![allow(dead_code)] // For instructions and operands we're not currently using.
+use crate::asm::CodeBlock;
+
+mod arg;
mod inst;
mod opnd;
+
+use arg::*;
+use inst::*;
+use opnd::*;
+
+/// Checks that a signed value fits within the specified number of bits.
+const fn imm_fits_bits(imm: i64, num_bits: u8) -> bool {
+ let minimum = if num_bits == 64 { i64::MIN } else { -2_i64.pow((num_bits as u32) - 1) };
+ let maximum = if num_bits == 64 { i64::MAX } else { 2_i64.pow((num_bits as u32) - 1) - 1 };
+
+ imm >= minimum && imm <= maximum
+}
+
+/// Checks that an unsigned value fits within the specified number of bits.
+const fn uimm_fits_bits(uimm: u64, num_bits: u8) -> bool {
+ let maximum = if num_bits == 64 { u64::MAX } else { 2_u64.pow(num_bits as u32) - 1 };
+
+ uimm <= maximum
+}
+
+/// ADD - add rn and rm, put the result in rd, don't update flags
+pub fn add(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(
+ rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
+ "All operands must be of the same size."
+ );
+
+ DataReg::add(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(imm12)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+ assert!(uimm_fits_bits(imm12, 12), "The immediate operand must be 12 bits or less.");
+
+ DataImm::add(rd.reg_no, rn.reg_no, imm12 as u16, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to add instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// ADDS - add rn and rm, put the result in rd, update flags
+pub fn adds(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(
+ rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
+ "All operands must be of the same size."
+ );
+
+ DataReg::adds(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(imm12)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+ assert!(uimm_fits_bits(imm12, 12), "The immediate operand must be 12 bits or less.");
+
+ DataImm::adds(rd.reg_no, rn.reg_no, imm12 as u16, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to adds instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// AND - and rn and rm, put the result in rd, don't update flags
+pub fn and(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(
+ rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
+ "All operands must be of the same size."
+ );
+
+ LogicalReg::and(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(imm)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+
+ LogicalImm::and(rd.reg_no, rn.reg_no, imm.try_into().unwrap(), rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to and instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// ANDS - and rn and rm, put the result in rd, update flags
+pub fn ands(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(
+ rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
+ "All operands must be of the same size."
+ );
+
+ LogicalReg::ands(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(imm)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+
+ LogicalImm::ands(rd.reg_no, rn.reg_no, imm.try_into().unwrap(), rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to ands instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// B.cond - branch to target if condition is true
+pub fn bcond(cb: &mut CodeBlock, cond: Condition, byte_offset: A64Opnd) {
+ let bytes: [u8; 4] = match byte_offset {
+ A64Opnd::Imm(imm) => {
+ assert!(imm_fits_bits(imm, 21), "The immediate operand must be 21 bits or less.");
+ assert!(imm & 0b11 == 0, "The immediate operand must be aligned to a 2-bit boundary.");
+
+ BranchCond::bcond(cond, imm as i32).into()
+ },
+ _ => panic!("Invalid operand combination to bcond instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// BL - branch with link (offset is number of instructions to jump)
+pub fn bl(cb: &mut CodeBlock, imm26: A64Opnd) {
+ let bytes: [u8; 4] = match imm26 {
+ A64Opnd::Imm(imm26) => {
+ assert!(imm_fits_bits(imm26, 26), "The immediate operand must be 26 bits or less.");
+
+ Call::bl(imm26 as i32).into()
+ },
+ _ => panic!("Invalid operand combination to bl instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// BR - branch to a register
+pub fn br(cb: &mut CodeBlock, rn: A64Opnd) {
+ let bytes: [u8; 4] = match rn {
+ A64Opnd::Reg(rn) => Branch::br(rn.reg_no).into(),
+ _ => panic!("Invalid operand to br instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// CMP - compare rn and rm, update flags
+pub fn cmp(cb: &mut CodeBlock, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rn, rm) {
+ (A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(
+ rn.num_bits == rm.num_bits,
+ "All operands must be of the same size."
+ );
+
+ DataReg::cmp(rn.reg_no, rm.reg_no, rn.num_bits).into()
+ },
+ (A64Opnd::Reg(rn), A64Opnd::UImm(imm12)) => {
+ assert!(uimm_fits_bits(imm12, 12), "The immediate operand must be 12 bits or less.");
+
+ DataImm::cmp(rn.reg_no, imm12 as u16, rn.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to cmp instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LDADDAL - atomic add with acquire and release semantics
+pub fn ldaddal(cb: &mut CodeBlock, rs: A64Opnd, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rs, rt, rn) {
+ (A64Opnd::Reg(rs), A64Opnd::Reg(rt), A64Opnd::Reg(rn)) => {
+ assert!(
+ rs.num_bits == rt.num_bits && rt.num_bits == rn.num_bits,
+ "All operands must be of the same size."
+ );
+
+ Atomic::ldaddal(rs.reg_no, rt.reg_no, rn.reg_no, rs.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to ldaddal instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LDUR - load a memory address into a register
+pub fn ldur(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
+ assert!(rt.num_bits == rn.num_bits, "Expected registers to be the same size");
+ assert!(imm_fits_bits(rn.disp.into(), 9), "Expected displacement to be 9 bits or less");
+
+ Load::ldur(rt.reg_no, rn.base_reg_no, rn.disp as i16, rt.num_bits).into()
+ },
+ _ => panic!("Invalid operands for LDUR")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LSL - logical shift left a register by an immediate
+pub fn lsl(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, shift: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, shift) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(uimm)) => {
+ assert!(rd.num_bits == rn.num_bits, "Expected registers to be the same size");
+ assert!(uimm_fits_bits(uimm, 6), "Expected shift to be 6 bits or less");
+
+ ShiftImm::lsl(rd.reg_no, rn.reg_no, uimm as u8, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operands combination to lsl instruction")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// LSR - logical shift right a register by an immediate
+pub fn lsr(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, shift: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, shift) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(uimm)) => {
+ assert!(rd.num_bits == rn.num_bits, "Expected registers to be the same size");
+ assert!(uimm_fits_bits(uimm, 6), "Expected shift to be 6 bits or less");
+
+ ShiftImm::lsr(rd.reg_no, rn.reg_no, uimm as u8, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operands combination to lsr instruction")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// MOVK - move a 16 bit immediate into a register, keep the other bits in place
+pub fn movk(cb: &mut CodeBlock, rd: A64Opnd, imm16: A64Opnd, shift: u8) {
+ let bytes: [u8; 4] = match (rd, imm16) {
+ (A64Opnd::Reg(rd), A64Opnd::UImm(imm16)) => {
+ assert!(uimm_fits_bits(imm16, 16), "The immediate operand must be 16 bits or less.");
+
+ Mov::movk(rd.reg_no, imm16 as u16, shift, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to movk instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// MOVZ - move a 16 bit immediate into a register, zero the other bits
+pub fn movz(cb: &mut CodeBlock, rd: A64Opnd, imm16: A64Opnd, shift: u8) {
+ let bytes: [u8; 4] = match (rd, imm16) {
+ (A64Opnd::Reg(rd), A64Opnd::UImm(imm16)) => {
+ assert!(uimm_fits_bits(imm16, 16), "The immediate operand must be 16 bits or less.");
+
+ Mov::movz(rd.reg_no, imm16 as u16, shift, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to movz instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// STUR - store a value in a register at a memory address
+pub fn stur(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {
+ let bytes: [u8; 4] = match (rt, rn) {
+ (A64Opnd::Reg(rt), A64Opnd::Mem(rn)) => {
+ assert!(rt.num_bits == rn.num_bits, "Expected registers to be the same size");
+ assert!(imm_fits_bits(rn.disp.into(), 9), "Expected displacement to be 9 bits or less");
+
+ Store::stur(rt.reg_no, rn.base_reg_no, rn.disp as i16, rt.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to stur instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// SUB - subtract rm from rn, put the result in rd, don't update flags
+pub fn sub(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(
+ rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
+ "All operands must be of the same size."
+ );
+
+ DataReg::sub(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(imm12)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+ assert!(uimm_fits_bits(imm12, 12), "The immediate operand must be 12 bits or less.");
+
+ DataImm::sub(rd.reg_no, rn.reg_no, imm12 as u16, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to sub instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// SUBS - subtract rm from rn, put the result in rd, update flags
+pub fn subs(cb: &mut CodeBlock, rd: A64Opnd, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rd, rn, rm) {
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(
+ rd.num_bits == rn.num_bits && rn.num_bits == rm.num_bits,
+ "All operands must be of the same size."
+ );
+
+ DataReg::subs(rd.reg_no, rn.reg_no, rm.reg_no, rd.num_bits).into()
+ },
+ (A64Opnd::Reg(rd), A64Opnd::Reg(rn), A64Opnd::UImm(imm12)) => {
+ assert!(rd.num_bits == rn.num_bits, "rd and rn must be of the same size.");
+ assert!(uimm_fits_bits(imm12, 12), "The immediate operand must be 12 bits or less.");
+
+ DataImm::subs(rd.reg_no, rn.reg_no, imm12 as u16, rd.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to subs instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// RET - unconditionally return to a location in a register, defaults to X30
+pub fn ret(cb: &mut CodeBlock, rn: A64Opnd) {
+ let bytes: [u8; 4] = match rn {
+ A64Opnd::None => Branch::ret(30).into(),
+ A64Opnd::Reg(reg) => Branch::ret(reg.reg_no).into(),
+ _ => panic!("Invalid operand to ret instruction.")
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+/// TST - test the bits of a register against a mask, then update flags
+pub fn tst(cb: &mut CodeBlock, rn: A64Opnd, rm: A64Opnd) {
+ let bytes: [u8; 4] = match (rn, rm) {
+ (A64Opnd::Reg(rn), A64Opnd::Reg(rm)) => {
+ assert!(rn.num_bits == rm.num_bits, "All operands must be of the same size.");
+
+ LogicalReg::tst(rn.reg_no, rm.reg_no, rn.num_bits).into()
+ },
+ (A64Opnd::Reg(rn), A64Opnd::UImm(imm)) => {
+ LogicalImm::tst(rn.reg_no, imm.try_into().unwrap(), rn.num_bits).into()
+ },
+ _ => panic!("Invalid operand combination to tst instruction."),
+ };
+
+ cb.write_bytes(&bytes);
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ /// Check that the bytes for an instruction sequence match a hex string
+ fn check_bytes<R>(bytes: &str, run: R) where R: FnOnce(&mut super::CodeBlock) {
+ let mut cb = super::CodeBlock::new_dummy(128);
+ run(&mut cb);
+ assert_eq!(format!("{:x}", cb), bytes);
+ }
+
+ #[test]
+ fn test_imm_fits_bits() {
+ assert!(imm_fits_bits(i8::MAX.into(), 8));
+ assert!(imm_fits_bits(i8::MIN.into(), 8));
+
+ assert!(imm_fits_bits(i16::MAX.into(), 16));
+ assert!(imm_fits_bits(i16::MIN.into(), 16));
+
+ assert!(imm_fits_bits(i32::MAX.into(), 32));
+ assert!(imm_fits_bits(i32::MIN.into(), 32));
+
+ assert!(imm_fits_bits(i64::MAX.into(), 64));
+ assert!(imm_fits_bits(i64::MIN.into(), 64));
+ }
+
+ #[test]
+ fn test_uimm_fits_bits() {
+ assert!(uimm_fits_bits(u8::MAX.into(), 8));
+ assert!(uimm_fits_bits(u16::MAX.into(), 16));
+ assert!(uimm_fits_bits(u32::MAX.into(), 32));
+ assert!(uimm_fits_bits(u64::MAX.into(), 64));
+ }
+
+ #[test]
+ fn test_add_register() {
+ check_bytes("2000028b", |cb| add(cb, X0, X1, X2));
+ }
+
+ #[test]
+ fn test_add_immediate() {
+ check_bytes("201c0091", |cb| add(cb, X0, X1, A64Opnd::new_uimm(7)));
+ }
+
+ #[test]
+ fn test_adds_register() {
+ check_bytes("200002ab", |cb| adds(cb, X0, X1, X2));
+ }
+
+ #[test]
+ fn test_adds_immediate() {
+ check_bytes("201c00b1", |cb| adds(cb, X0, X1, A64Opnd::new_uimm(7)));
+ }
+
+ #[test]
+ fn test_and_register() {
+ check_bytes("2000028a", |cb| and(cb, X0, X1, X2));
+ }
+
+ #[test]
+ fn test_and_immediate() {
+ check_bytes("20084092", |cb| and(cb, X0, X1, A64Opnd::new_uimm(7)));
+ }
+
+ #[test]
+ fn test_ands_register() {
+ check_bytes("200002ea", |cb| ands(cb, X0, X1, X2));
+ }
+
+ #[test]
+ fn test_ands_immediate() {
+ check_bytes("200840f2", |cb| ands(cb, X0, X1, A64Opnd::new_uimm(7)));
+ }
+
+ #[test]
+ fn test_bcond() {
+ check_bytes("01200054", |cb| bcond(cb, Condition::NE, A64Opnd::new_imm(0x400)));
+ }
+
+ #[test]
+ fn test_bl() {
+ check_bytes("00040094", |cb| bl(cb, A64Opnd::new_imm(1024)));
+ }
+
+ #[test]
+ fn test_br() {
+ check_bytes("80021fd6", |cb| br(cb, X20));
+ }
+
+ #[test]
+ fn test_cmp_register() {
+ check_bytes("5f010beb", |cb| cmp(cb, X10, X11));
+ }
+
+ #[test]
+ fn test_cmp_immediate() {
+ check_bytes("5f3900f1", |cb| cmp(cb, X10, A64Opnd::new_uimm(14)));
+ }
+
+ #[test]
+ fn test_ldaddal() {
+ check_bytes("8b01eaf8", |cb| ldaddal(cb, X10, X11, X12));
+ }
+
+ #[test]
+ fn test_ldur() {
+ check_bytes("20b047f8", |cb| ldur(cb, X0, A64Opnd::new_mem(X1, 123)));
+ }
+
+ #[test]
+ fn test_lsl() {
+ check_bytes("6ac572d3", |cb| lsl(cb, X10, X11, A64Opnd::new_uimm(14)));
+ }
+
+ #[test]
+ fn test_lsr() {
+ check_bytes("6afd4ed3", |cb| lsr(cb, X10, X11, A64Opnd::new_uimm(14)));
+ }
+
+ #[test]
+ fn test_movk() {
+ check_bytes("600fa0f2", |cb| movk(cb, X0, A64Opnd::new_uimm(123), 16));
+ }
+
+ #[test]
+ fn test_movz() {
+ check_bytes("600fa0d2", |cb| movz(cb, X0, A64Opnd::new_uimm(123), 16));
+ }
+
+ #[test]
+ fn test_ret_none() {
+ check_bytes("c0035fd6", |cb| ret(cb, A64Opnd::None));
+ }
+
+ #[test]
+ fn test_ret_register() {
+ check_bytes("80025fd6", |cb| ret(cb, X20));
+ }
+
+ #[test]
+ fn test_stur() {
+ check_bytes("6a0108f8", |cb| stur(cb, X10, A64Opnd::new_mem(X11, 128)));
+ }
+
+ #[test]
+ fn test_sub_register() {
+ check_bytes("200002cb", |cb| sub(cb, X0, X1, X2));
+ }
+
+ #[test]
+ fn test_sub_immediate() {
+ check_bytes("201c00d1", |cb| sub(cb, X0, X1, A64Opnd::new_uimm(7)));
+ }
+
+ #[test]
+ fn test_subs_register() {
+ check_bytes("200002eb", |cb| subs(cb, X0, X1, X2));
+ }
+
+ #[test]
+ fn test_subs_immediate() {
+ check_bytes("201c00f1", |cb| subs(cb, X0, X1, A64Opnd::new_uimm(7)));
+ }
+
+ #[test]
+ fn test_tst_register() {
+ check_bytes("1f0001ea", |cb| tst(cb, X0, X1));
+ }
+
+ #[test]
+ fn test_tst_immediate() {
+ check_bytes("3f0840f2", |cb| tst(cb, X1, A64Opnd::new_uimm(7)));
+ }
+}