From 8cb00519cddadae8728d2b2e51a36da71d5bfe67 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 7 Nov 2021 15:41:28 +0100 Subject: stage2 AArch64: implement airCmp --- src/arch/aarch64/CodeGen.zig | 77 +++++++++++++++++++++++++++++++++++--- src/arch/aarch64/Emit.zig | 25 +++++++++++++ src/arch/aarch64/Mir.zig | 14 +++++++ src/arch/aarch64/bits.zig | 88 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 199 insertions(+), 5 deletions(-) diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index d3c72fe1b1..4d56e592ef 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -1634,8 +1634,6 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { } fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { - _ = op; - const bin_op = self.air.instructions.items(.data)[inst].bin_op; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -1646,10 +1644,79 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - _ = lhs; - _ = rhs; + const result: MCValue = result: { + const lhs_is_register = lhs == .register; + const rhs_is_register = rhs == .register; + // lhs should always be a register + const rhs_should_be_register = switch (rhs) { + .immediate => |imm| imm < 0 or imm > std.math.maxInt(u12), + else => true, + }; + + var lhs_mcv = lhs; + var rhs_mcv = rhs; + + // Allocate registers + if (rhs_should_be_register) { + if (!lhs_is_register and !rhs_is_register) { + const regs = try self.register_manager.allocRegs(2, .{ + Air.refToIndex(bin_op.rhs).?, Air.refToIndex(bin_op.lhs).?, + }, &.{}); + lhs_mcv = MCValue{ .register = regs[0] }; + rhs_mcv = MCValue{ .register = regs[1] }; + } else if (!rhs_is_register) { + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.rhs).?, &.{}) }; + } + } + if (!lhs_is_register) { + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.lhs).?, &.{}) }; + } + + // Move the operands to the newly allocated registers + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + if (lhs_mcv == .register and !lhs_is_register) { + try self.genSetReg(ty, lhs_mcv.register, lhs); + branch.inst_table.putAssumeCapacity(Air.refToIndex(bin_op.lhs).?, lhs); + } + if (rhs_mcv == .register and !rhs_is_register) { + try self.genSetReg(ty, rhs_mcv.register, rhs); + branch.inst_table.putAssumeCapacity(Air.refToIndex(bin_op.rhs).?, rhs); + } - return self.fail("TODO implement cmp for {}", .{self.target.cpu.arch}); + // The destination register is not present in the cmp instruction + // The signedness of the integer does not matter for the cmp instruction + switch (rhs_mcv) { + .register => |reg| { + _ = try self.addInst(.{ + .tag = .cmp_shifted_register, + .data = .{ .rrr_imm6_shift = .{ + .rd = .xzr, + .rn = lhs_mcv.register, + .rm = reg, + .imm6 = 0, + .shift = .lsl, + } }, + }); + }, + .immediate => |imm| { + _ = try self.addInst(.{ + .tag = .cmp_immediate, + .data = .{ .rr_imm12_sh = .{ + .rd = .xzr, + .rn = lhs_mcv.register, + .imm12 = @intCast(u12, imm), + } }, + }); + }, + else => unreachable, + } + + break :result switch (ty.isSignedInt()) { + true => MCValue{ .compare_flags_signed = op }, + false => MCValue{ .compare_flags_unsigned = op }, + }; + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig index 665d529245..b5ca0686a1 100644 --- a/src/arch/aarch64/Emit.zig +++ b/src/arch/aarch64/Emit.zig @@ -65,6 +65,7 @@ pub fn emitMir( const inst = @intCast(u32, index); switch (tag) { .add_immediate => try emit.mirAddSubtractImmediate(inst), + .cmp_immediate => try emit.mirAddSubtractImmediate(inst), .sub_immediate => try emit.mirAddSubtractImmediate(inst), .b => try emit.mirBranch(inst), @@ -78,6 +79,8 @@ pub fn emitMir( .call_extern => try emit.mirCallExtern(inst), + .cmp_shifted_register => try emit.mirAddSubtractShiftedRegister(inst), + .dbg_line => try emit.mirDbgLine(inst), .dbg_prologue_end => try emit.mirDebugPrologueEnd(), @@ -347,6 +350,12 @@ fn mirAddSubtractImmediate(emit: *Emit, inst: Mir.Inst.Index) !void { rr_imm12_sh.imm12, rr_imm12_sh.sh == 1, )), + .cmp_immediate => try emit.writeInstruction(Instruction.subs( + rr_imm12_sh.rd, + rr_imm12_sh.rn, + rr_imm12_sh.imm12, + rr_imm12_sh.sh == 1, + )), .sub_immediate => try emit.writeInstruction(Instruction.sub( rr_imm12_sh.rd, rr_imm12_sh.rn, @@ -453,6 +462,22 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void { } } +fn mirAddSubtractShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const rrr_imm6_shift = emit.mir.instructions.items(.data)[inst].rrr_imm6_shift; + + switch (tag) { + .cmp_shifted_register => try emit.writeInstruction(Instruction.subsShiftedRegister( + rrr_imm6_shift.rd, + rrr_imm6_shift.rn, + rrr_imm6_shift.rm, + rrr_imm6_shift.shift, + rrr_imm6_shift.imm6, + )), + else => unreachable, + } +} + fn mirLoadMemory(emit: *Emit, inst: Mir.Inst.Index) !void { assert(emit.mir.instructions.items(.tag)[inst] == .load_memory); const payload = emit.mir.instructions.items(.data)[inst].payload; diff --git a/src/arch/aarch64/Mir.zig b/src/arch/aarch64/Mir.zig index 43e7c7f1ed..4bbce48d5b 100644 --- a/src/arch/aarch64/Mir.zig +++ b/src/arch/aarch64/Mir.zig @@ -36,6 +36,10 @@ pub const Inst = struct { brk, /// Pseudo-instruction: Call extern call_extern, + /// Compare (immediate) + cmp_immediate, + /// Compare (shifted register) + cmp_shifted_register, /// Pseudo-instruction: End of prologue dbg_prologue_end, /// Pseudo-instruction: Beginning of epilogue @@ -141,6 +145,16 @@ pub const Inst = struct { imm12: u12, sh: u1 = 0, }, + /// Three registers and a shift (shift type and 6-bit amount) + /// + /// Used by e.g. cmp_shifted_register + rrr_imm6_shift: struct { + rd: Register, + rn: Register, + rm: Register, + imm6: u6, + shift: bits.Instruction.AddSubtractShiftedRegisterShift, + }, /// Three registers and a LoadStoreOffset /// /// Used by e.g. str_register diff --git a/src/arch/aarch64/bits.zig b/src/arch/aarch64/bits.zig index f751170818..740cbdd3de 100644 --- a/src/arch/aarch64/bits.zig +++ b/src/arch/aarch64/bits.zig @@ -295,6 +295,18 @@ pub const Instruction = union(enum) { op: u1, sf: u1, }, + add_subtract_shifted_register: packed struct { + rd: u5, + rn: u5, + imm6: u6, + rm: u5, + fixed_1: u1 = 0b0, + shift: u2, + fixed_2: u5 = 0b01011, + s: u1, + op: u1, + sf: u1, + }, conditional_branch: struct { cond: u4, o0: u1, @@ -391,6 +403,7 @@ pub const Instruction = union(enum) { .no_operation => |v| @bitCast(u32, v), .logical_shifted_register => |v| @bitCast(u32, v), .add_subtract_immediate => |v| @bitCast(u32, v), + .add_subtract_shifted_register => |v| @bitCast(u32, v), // TODO once packed structs work, this can be refactored .conditional_branch => |v| @as(u32, v.cond) | (@as(u32, v.o0) << 4) | (@as(u32, v.imm19) << 5) | (@as(u32, v.o1) << 24) | (@as(u32, v.fixed) << 25), .compare_and_branch => |v| @as(u32, v.rt) | (@as(u32, v.imm19) << 5) | (@as(u32, v.op) << 24) | (@as(u32, v.fixed) << 25) | (@as(u32, v.sf) << 31), @@ -804,6 +817,35 @@ pub const Instruction = union(enum) { }; } + pub const AddSubtractShiftedRegisterShift = enum(u2) { lsl, lsr, asr, _ }; + + fn addSubtractShiftedRegister( + op: u1, + s: u1, + shift: AddSubtractShiftedRegisterShift, + rd: Register, + rn: Register, + rm: Register, + imm6: u6, + ) Instruction { + return Instruction{ + .add_subtract_shifted_register = .{ + .rd = rd.id(), + .rn = rn.id(), + .imm6 = imm6, + .rm = rm.id(), + .shift = @enumToInt(shift), + .s = s, + .op = op, + .sf = switch (rd.size()) { + 32 => 0b0, + 64 => 0b1, + else => unreachable, // unexpected register size + }, + }, + }; + } + fn conditionalBranch( o0: u1, o1: u1, @@ -1055,6 +1097,48 @@ pub const Instruction = union(enum) { return addSubtractImmediate(0b1, 0b1, rd, rn, imm, shift); } + // Add/subtract (shifted register) + + pub fn addShiftedRegister( + rd: Register, + rn: Register, + rm: Register, + shift: AddSubtractShiftedRegisterShift, + imm6: u6, + ) Instruction { + return addSubtractShiftedRegister(0b0, 0b0, shift, rd, rn, rm, imm6); + } + + pub fn addsShiftedRegister( + rd: Register, + rn: Register, + rm: Register, + shift: AddSubtractShiftedRegisterShift, + imm6: u6, + ) Instruction { + return addSubtractShiftedRegister(0b0, 0b1, shift, rd, rn, rm, imm6); + } + + pub fn subShiftedRegister( + rd: Register, + rn: Register, + rm: Register, + shift: AddSubtractShiftedRegisterShift, + imm6: u6, + ) Instruction { + return addSubtractShiftedRegister(0b1, 0b0, shift, rd, rn, rm, imm6); + } + + pub fn subsShiftedRegister( + rd: Register, + rn: Register, + rm: Register, + shift: AddSubtractShiftedRegisterShift, + imm6: u6, + ) Instruction { + return addSubtractShiftedRegister(0b1, 0b1, shift, rd, rn, rm, imm6); + } + // Conditional branch pub fn bCond(cond: Condition, offset: i21) Instruction { @@ -1231,6 +1315,10 @@ test "serialize instructions" { .inst = Instruction.cbz(.x10, 40), .expected = 0b1_011010_0_0000000000000001010_01010, }, + .{ // add x0, x1, x2, lsl #5 + .inst = Instruction.addShiftedRegister(.x0, .x1, .x2, .lsl, 5), + .expected = 0b1_0_0_01011_00_0_00010_000101_00001_00000, + }, }; for (testcases) |case| { -- cgit v1.2.3