aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/arch/aarch64/CodeGen.zig294
-rw-r--r--src/arch/aarch64/Emit.zig27
-rw-r--r--src/arch/aarch64/Mir.zig21
-rw-r--r--src/arch/aarch64/bits.zig82
4 files changed, 330 insertions, 94 deletions
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 19b444cb15..b3447f43e7 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -443,14 +443,17 @@ fn gen(self: *Self) !void {
});
// exitlude jumps
- if (self.exitlude_jump_relocs.items.len == 1) {
- // There is only one relocation. Hence,
- // this relocation must be at the end of
- // the code. Therefore, we can just delete
- // the space initially reserved for the
- // jump
- self.mir_instructions.orderedRemove(self.exitlude_jump_relocs.items[0]);
- } else for (self.exitlude_jump_relocs.items) |jmp_reloc| {
+ if (self.exitlude_jump_relocs.items.len > 0 and
+ self.exitlude_jump_relocs.items[self.exitlude_jump_relocs.items.len - 1] == self.mir_instructions.len - 2)
+ {
+ // If the last Mir instruction (apart from the
+ // dbg_epilogue_begin) is the last exitlude jump
+ // relocation (which would just jump one instruction
+ // further), it can be safely removed
+ self.mir_instructions.orderedRemove(self.exitlude_jump_relocs.pop());
+ }
+
+ for (self.exitlude_jump_relocs.items) |jmp_reloc| {
self.mir_instructions.set(jmp_reloc, .{
.tag = .b,
.data = .{ .inst = @intCast(u32, self.mir_instructions.len) },
@@ -564,11 +567,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.cmp_gt => try self.airCmp(inst, .gt),
.cmp_neq => try self.airCmp(inst, .neq),
- .bool_and => try self.airBoolOp(inst),
- .bool_or => try self.airBoolOp(inst),
- .bit_and => try self.airBitAnd(inst),
- .bit_or => try self.airBitOr(inst),
- .xor => try self.airXor(inst),
+ .bool_and => try self.airBinOp(inst),
+ .bool_or => try self.airBinOp(inst),
+ .bit_and => try self.airBinOp(inst),
+ .bit_or => try self.airBinOp(inst),
+ .xor => try self.airBinOp(inst),
.shr, .shr_exact => try self.airShr(inst),
.alloc => try self.airAlloc(inst),
@@ -815,9 +818,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
if (reg_ok) {
// Make sure the type can fit in a register before we try to allocate one.
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- if (abi_size <= ptr_bytes) {
+ if (abi_size <= 8) {
if (self.register_manager.tryAllocReg(inst)) |reg| {
return MCValue{ .register = registerAlias(reg, abi_size) };
}
@@ -950,10 +951,69 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
switch (operand_ty.zigTypeTag()) {
.Bool => {
// TODO convert this to mvn + and
- const dest = try self.binOp(.xor, null, operand, .{ .immediate = 1 }, operand_ty, Type.bool);
- break :result dest;
+ const op_reg = switch (operand) {
+ .register => |r| r,
+ else => try self.copyToTmpRegister(operand_ty, operand),
+ };
+ self.register_manager.freezeRegs(&.{op_reg});
+ defer self.register_manager.unfreezeRegs(&.{op_reg});
+
+ const dest_reg = blk: {
+ if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
+ break :blk op_reg;
+ }
+
+ break :blk try self.register_manager.allocReg(null);
+ };
+
+ _ = try self.addInst(.{
+ .tag = .eor_immediate,
+ .data = .{ .rr_bitmask = .{
+ .rd = dest_reg,
+ .rn = op_reg,
+ .imms = 0b000000,
+ .immr = 0b000000,
+ .n = 0b1,
+ } },
+ });
+
+ break :result MCValue{ .register = dest_reg };
+ },
+ .Vector => return self.fail("TODO bitwise not for vectors", .{}),
+ .Int => {
+ const int_info = operand_ty.intInfo(self.target.*);
+ if (int_info.bits <= 64) {
+ const op_reg = switch (operand) {
+ .register => |r| r,
+ else => try self.copyToTmpRegister(operand_ty, operand),
+ };
+ self.register_manager.freezeRegs(&.{op_reg});
+ defer self.register_manager.unfreezeRegs(&.{op_reg});
+
+ const dest_reg = blk: {
+ if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
+ break :blk op_reg;
+ }
+
+ break :blk try self.register_manager.allocReg(null);
+ };
+
+ _ = try self.addInst(.{
+ .tag = .mvn,
+ .data = .{ .rr_imm6_shift = .{
+ .rd = dest_reg,
+ .rm = op_reg,
+ .imm6 = 0,
+ .shift = .lsl,
+ } },
+ });
+
+ break :result MCValue{ .register = dest_reg };
+ } else {
+ return self.fail("TODO AArch64 not on integers > u64/i64", .{});
+ }
},
- else => return self.fail("TODO bitwise not", .{}),
+ else => unreachable,
}
},
}
@@ -976,7 +1036,20 @@ fn airMax(self: *Self, inst: Air.Inst.Index) !void {
fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const ptr = try self.resolveInst(bin_op.lhs);
+ const ptr_ty = self.air.typeOf(bin_op.lhs);
+ const len = try self.resolveInst(bin_op.rhs);
+ const len_ty = self.air.typeOf(bin_op.rhs);
+
+ const ptr_bits = self.target.cpu.arch.ptrBitWidth();
+ const ptr_bytes = @divExact(ptr_bits, 8);
+
+ const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2);
+ try self.genSetStack(ptr_ty, stack_offset + ptr_bytes, ptr);
+ try self.genSetStack(len_ty, stack_offset, len);
+ break :result MCValue{ .stack_offset = stack_offset };
+ };
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -1051,9 +1124,19 @@ fn binOpRegister(
if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
const mir_tag: Mir.Inst.Tag = switch (tag) {
- .add, .ptr_add => .add_shifted_register,
- .sub, .ptr_sub => .sub_shifted_register,
+ .add,
+ .ptr_add,
+ => .add_shifted_register,
+ .sub,
+ .ptr_sub,
+ => .sub_shifted_register,
.mul => .mul,
+ .bit_and,
+ .bool_and,
+ => .and_shifted_register,
+ .bit_or,
+ .bool_or,
+ => .orr_shifted_register,
.xor => .eor_shifted_register,
else => unreachable,
};
@@ -1074,7 +1157,12 @@ fn binOpRegister(
.rn = lhs_reg,
.rm = rhs_reg,
} },
- .xor => .{ .rrr_imm6_logical_shift = .{
+ .bit_and,
+ .bool_and,
+ .bit_or,
+ .bool_or,
+ .xor,
+ => .{ .rrr_imm6_logical_shift = .{
.rd = dest_reg,
.rn = lhs_reg,
.rm = rhs_reg,
@@ -1252,20 +1340,40 @@ fn binOp(
// lowered to a << 1
return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
} else {
- return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
+ return self.fail("TODO binary operations on int with bits > 64", .{});
}
},
else => unreachable,
}
},
// Bitwise operations on integers
- .xor => {
+ .bit_and,
+ .bit_or,
+ .xor,
+ => {
switch (lhs_ty.zigTypeTag()) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
- .Int => return self.fail("TODO binary operations on vectors", .{}),
- .Bool => {
+ .Int => {
assert(lhs_ty.eql(rhs_ty));
- // TODO boolean operations with immediates
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 64) {
+ // TODO implement bitwise operations with immediates
+ return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
+ } else {
+ return self.fail("TODO binary operations on int with bits > 64", .{});
+ }
+ },
+ else => unreachable,
+ }
+ },
+ .bool_and,
+ .bool_or,
+ => {
+ switch (lhs_ty.zigTypeTag()) {
+ .Bool => {
+ assert(lhs != .immediate); // should have been handled by Sema
+ assert(rhs != .immediate); // should have been handled by Sema
+
return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
},
else => unreachable,
@@ -1387,24 +1495,6 @@ fn airMod(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement bitwise and for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
-fn airBitOr(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement bitwise or for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
-fn airXor(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement xor for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
fn airShl(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shl for {}", .{self.target.cpu.arch});
@@ -1523,22 +1613,39 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice_ptr for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const ptr_bits = self.target.cpu.arch.ptrBitWidth();
+ const ptr_bytes = @divExact(ptr_bits, 8);
+ const mcv = try self.resolveInst(ty_op.operand);
+ switch (mcv) {
+ .dead, .unreach, .none => unreachable,
+ .register => unreachable, // a slice doesn't fit in one register
+ .stack_offset => |off| {
+ break :result MCValue{ .stack_offset = off + ptr_bytes };
+ },
+ .memory => |addr| {
+ break :result MCValue{ .memory = addr };
+ },
+ else => return self.fail("TODO implement slice_len for {}", .{mcv}),
+ }
+ };
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const ptr_bits = self.target.cpu.arch.ptrBitWidth();
+ const ptr_bytes = @divExact(ptr_bits, 8);
const mcv = try self.resolveInst(ty_op.operand);
switch (mcv) {
- .dead, .unreach => unreachable,
+ .dead, .unreach, .none => unreachable,
.register => unreachable, // a slice doesn't fit in one register
.stack_offset => |off| {
break :result MCValue{ .stack_offset = off };
},
.memory => |addr| {
- break :result MCValue{ .memory = addr + 8 };
+ break :result MCValue{ .memory = addr + ptr_bytes };
},
else => return self.fail("TODO implement slice_len for {}", .{mcv}),
}
@@ -1548,13 +1655,33 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_slice_len_ptr for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const ptr_bits = self.target.cpu.arch.ptrBitWidth();
+ const ptr_bytes = @divExact(ptr_bits, 8);
+ const mcv = try self.resolveInst(ty_op.operand);
+ switch (mcv) {
+ .dead, .unreach, .none => unreachable,
+ .ptr_stack_offset => |off| {
+ break :result MCValue{ .ptr_stack_offset = off + ptr_bytes };
+ },
+ else => return self.fail("TODO implement ptr_slice_len_ptr for {}", .{mcv}),
+ }
+ };
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_slice_ptr_ptr for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const mcv = try self.resolveInst(ty_op.operand);
+ switch (mcv) {
+ .dead, .unreach, .none => unreachable,
+ .ptr_stack_offset => |off| {
+ break :result MCValue{ .ptr_stack_offset = off };
+ },
+ else => return self.fail("TODO implement ptr_slice_len_ptr for {}", .{mcv}),
+ }
+ };
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -2882,7 +3009,17 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
const body = self.air.extra[extra.end..][0..extra.data.body_len];
try self.genBody(body);
- for (self.blocks.getPtr(inst).?.relocs.items) |reloc| try self.performReloc(reloc);
+ // relocations for `br` instructions
+ const relocs = &self.blocks.getPtr(inst).?.relocs;
+ if (relocs.items.len > 0 and relocs.items[relocs.items.len - 1] == self.mir_instructions.len - 1) {
+ // If the last Mir instruction is the last relocation (which
+ // would just jump one instruction further), it can be safely
+ // removed
+ self.mir_instructions.orderedRemove(relocs.pop());
+ }
+ for (relocs.items) |reloc| {
+ try self.performReloc(reloc);
+ }
const result = self.blocks.getPtr(inst).?.mcv;
return self.finishAir(inst, result, .{ .none, .none, .none });
@@ -2912,15 +3049,6 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, .dead, .{ branch.operand, .none, .none });
}
-fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const air_tags = self.air.instructions.items(.tag);
- _ = air_tags;
-
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement boolean operations for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const block_data = self.blocks.getPtr(block).?;
@@ -3136,11 +3264,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
4, 8 => .str_stack,
else => unreachable, // unexpected abi size
};
- const rt: Register = switch (abi_size) {
- 1, 2, 4 => reg.to32(),
- 8 => reg.to64(),
- else => unreachable, // unexpected abi size
- };
+ const rt = registerAlias(reg, abi_size);
_ = try self.addInst(.{
.tag = tag,
@@ -3399,9 +3523,20 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airArrayToSlice for {}", .{
- self.target.cpu.arch,
- });
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const ptr_ty = self.air.typeOf(ty_op.operand);
+ const ptr = try self.resolveInst(ty_op.operand);
+ const array_ty = ptr_ty.childType();
+ const array_len = @intCast(u32, array_ty.arrayLen());
+
+ const ptr_bits = self.target.cpu.arch.ptrBitWidth();
+ const ptr_bytes = @divExact(ptr_bits, 8);
+
+ const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2);
+ try self.genSetStack(ptr_ty, stack_offset + ptr_bytes, ptr);
+ try self.genSetStack(Type.initTag(.usize), stack_offset, .{ .immediate = array_len });
+ break :result MCValue{ .stack_offset = stack_offset };
+ };
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -3622,7 +3757,6 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
if (typed_value.val.castTag(.decl_ref)) |payload| {
return self.lowerDeclRef(typed_value, payload.data);
@@ -3652,13 +3786,19 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
},
.Int => {
const info = typed_value.ty.intInfo(self.target.*);
- if (info.bits <= ptr_bits and info.signedness == .signed) {
- return MCValue{ .immediate = @bitCast(u64, typed_value.val.toSignedInt()) };
- }
- if (info.bits > ptr_bits or info.signedness == .signed) {
- return self.fail("TODO const int bigger than ptr and signed int", .{});
+ if (info.bits <= 64) {
+ const unsigned = switch (info.signedness) {
+ .signed => blk: {
+ const signed = typed_value.val.toSignedInt();
+ break :blk @bitCast(u64, signed);
+ },
+ .unsigned => typed_value.val.toUnsignedInt(),
+ };
+
+ return MCValue{ .immediate = unsigned };
+ } else {
+ return self.lowerUnnamedConst(typed_value);
}
- return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
},
.Bool => {
return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
@@ -3875,7 +4015,7 @@ fn parseRegName(name: []const u8) ?Register {
return std.meta.stringToEnum(Register, name);
}
-fn registerAlias(reg: Register, size_bytes: u32) Register {
+fn registerAlias(reg: Register, size_bytes: u64) Register {
if (size_bytes == 0) {
unreachable; // should be comptime known
} else if (size_bytes <= 4) {
diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig
index d98aa09e56..84bb559824 100644
--- a/src/arch/aarch64/Emit.zig
+++ b/src/arch/aarch64/Emit.zig
@@ -95,6 +95,8 @@ pub fn emitMir(
.call_extern => try emit.mirCallExtern(inst),
+ .eor_immediate => try emit.mirLogicalImmediate(inst),
+
.add_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
.cmp_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
.sub_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
@@ -106,7 +108,9 @@ pub fn emitMir(
.dbg_prologue_end => try emit.mirDebugPrologueEnd(),
.dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(),
+ .and_shifted_register => try emit.mirLogicalShiftedRegister(inst),
.eor_shifted_register => try emit.mirLogicalShiftedRegister(inst),
+ .orr_shifted_register => try emit.mirLogicalShiftedRegister(inst),
.load_memory_got => try emit.mirLoadMemoryPie(inst),
.load_memory_direct => try emit.mirLoadMemoryPie(inst),
@@ -605,6 +609,21 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
}
}
+fn mirLogicalImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
+ const tag = emit.mir.instructions.items(.tag)[inst];
+ const rr_bitmask = emit.mir.instructions.items(.data)[inst].rr_bitmask;
+ const rd = rr_bitmask.rd;
+ const rn = rr_bitmask.rn;
+ const imms = rr_bitmask.imms;
+ const immr = rr_bitmask.immr;
+ const n = rr_bitmask.n;
+
+ switch (tag) {
+ .eor_immediate => try emit.writeInstruction(Instruction.eorImmediate(rd, rn, imms, immr, n)),
+ else => unreachable,
+ }
+}
+
fn mirAddSubtractShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const rrr_imm6_shift = emit.mir.instructions.items(.data)[inst].rrr_imm6_shift;
@@ -643,7 +662,9 @@ fn mirLogicalShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
const imm6 = rrr_imm6_logical_shift.imm6;
switch (tag) {
- .eor_shifted_register => try emit.writeInstruction(Instruction.eor(rd, rn, rm, shift, imm6)),
+ .and_shifted_register => try emit.writeInstruction(Instruction.andShiftedRegister(rd, rn, rm, shift, imm6)),
+ .eor_shifted_register => try emit.writeInstruction(Instruction.eorShiftedRegister(rd, rn, rm, shift, imm6)),
+ .orr_shifted_register => try emit.writeInstruction(Instruction.orrShiftedRegister(rd, rn, rm, shift, imm6)),
else => unreachable,
}
}
@@ -844,7 +865,7 @@ fn mirMoveRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
switch (tag) {
.mov_register => {
const rr = emit.mir.instructions.items(.data)[inst].rr;
- try emit.writeInstruction(Instruction.orr(rr.rd, .xzr, rr.rn, .lsl, 0));
+ try emit.writeInstruction(Instruction.orrShiftedRegister(rr.rd, .xzr, rr.rn, .lsl, 0));
},
.mov_to_from_sp => {
const rr = emit.mir.instructions.items(.data)[inst].rr;
@@ -852,7 +873,7 @@ fn mirMoveRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
},
.mvn => {
const rr_imm6_shift = emit.mir.instructions.items(.data)[inst].rr_imm6_shift;
- try emit.writeInstruction(Instruction.orn(rr_imm6_shift.rd, .xzr, rr_imm6_shift.rm, .lsl, 0));
+ try emit.writeInstruction(Instruction.ornShiftedRegister(rr_imm6_shift.rd, .xzr, rr_imm6_shift.rm, rr_imm6_shift.shift, rr_imm6_shift.imm6));
},
else => unreachable,
}
diff --git a/src/arch/aarch64/Mir.zig b/src/arch/aarch64/Mir.zig
index c2ffbad422..9d5837f2f5 100644
--- a/src/arch/aarch64/Mir.zig
+++ b/src/arch/aarch64/Mir.zig
@@ -28,6 +28,8 @@ pub const Inst = struct {
add_immediate,
/// Add (shifted register)
add_shifted_register,
+ /// Bitwise AND (shifted register)
+ and_shifted_register,
/// Branch conditionally
b_cond,
/// Branch
@@ -54,6 +56,8 @@ pub const Inst = struct {
dbg_epilogue_begin,
/// Pseudo-instruction: Update debug line
dbg_line,
+ /// Bitwise Exclusive OR (immediate)
+ eor_immediate,
/// Bitwise Exclusive OR (shifted register)
eor_shifted_register,
/// Loads the contents into a register
@@ -106,6 +110,8 @@ pub const Inst = struct {
mvn,
/// No Operation
nop,
+ /// Bitwise inclusive OR (shifted register)
+ orr_shifted_register,
/// Pseudo-instruction: Pop multiple registers
pop_regs,
/// Psuedo-instruction: Push multiple registers
@@ -231,14 +237,25 @@ pub const Inst = struct {
imm12: u12,
sh: u1 = 0,
},
- /// Two registers and a shift (shift type and 6-bit amount)
+ /// Two registers and a shift (logical instruction version)
+ /// (shift type and 6-bit amount)
///
/// Used by e.g. mvn
rr_imm6_shift: struct {
rd: Register,
rm: Register,
imm6: u6,
- shift: bits.Instruction.AddSubtractShiftedRegisterShift,
+ shift: bits.Instruction.LogicalShiftedRegisterShift,
+ },
+ /// Two registers and a bitmask immediate
+ ///
+ /// Used by e.g. eor_immediate
+ rr_bitmask: struct {
+ rd: Register,
+ rn: Register,
+ imms: u6,
+ immr: u6,
+ n: u1,
},
/// Two registers
///
diff --git a/src/arch/aarch64/bits.zig b/src/arch/aarch64/bits.zig
index c427f86472..9e5ad30701 100644
--- a/src/arch/aarch64/bits.zig
+++ b/src/arch/aarch64/bits.zig
@@ -323,6 +323,16 @@ pub const Instruction = union(enum) {
op: u1,
sf: u1,
},
+ logical_immediate: packed struct {
+ rd: u5,
+ rn: u5,
+ imms: u6,
+ immr: u6,
+ n: u1,
+ fixed: u6 = 0b100100,
+ opc: u2,
+ sf: u1,
+ },
add_subtract_shifted_register: packed struct {
rd: u5,
rn: u5,
@@ -487,6 +497,7 @@ pub const Instruction = union(enum) {
.no_operation => |v| @bitCast(u32, v),
.logical_shifted_register => |v| @bitCast(u32, v),
.add_subtract_immediate => |v| @bitCast(u32, v),
+ .logical_immediate => |v| @bitCast(u32, v),
.add_subtract_shifted_register => |v| @bitCast(u32, v),
// TODO once packed structs work, this can be refactored
.conditional_branch => |v| @as(u32, v.cond) | (@as(u32, v.o0) << 4) | (@as(u32, v.imm19) << 5) | (@as(u32, v.o1) << 24) | (@as(u32, v.fixed) << 25),
@@ -900,6 +911,31 @@ pub const Instruction = union(enum) {
};
}
+ fn logicalImmediate(
+ opc: u2,
+ rd: Register,
+ rn: Register,
+ imms: u6,
+ immr: u6,
+ n: u1,
+ ) Instruction {
+ return Instruction{
+ .logical_immediate = .{
+ .rd = rd.enc(),
+ .rn = rn.enc(),
+ .imms = imms,
+ .immr = immr,
+ .n = n,
+ .opc = opc,
+ .sf = switch (rd.size()) {
+ 32 => 0b0,
+ 64 => 0b1,
+ else => unreachable, // unexpected register size
+ },
+ },
+ };
+ }
+
pub const AddSubtractShiftedRegisterShift = enum(u2) { lsl, lsr, asr, _ };
fn addSubtractShiftedRegister(
@@ -1173,7 +1209,7 @@ pub const Instruction = union(enum) {
// Logical (shifted register)
- pub fn @"and"(
+ pub fn andShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
@@ -1183,7 +1219,7 @@ pub const Instruction = union(enum) {
return logicalShiftedRegister(0b00, 0b0, rd, rn, rm, shift, amount);
}
- pub fn bic(
+ pub fn bicShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
@@ -1193,7 +1229,7 @@ pub const Instruction = union(enum) {
return logicalShiftedRegister(0b00, 0b1, rd, rn, rm, shift, amount);
}
- pub fn orr(
+ pub fn orrShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
@@ -1203,7 +1239,7 @@ pub const Instruction = union(enum) {
return logicalShiftedRegister(0b01, 0b0, rd, rn, rm, shift, amount);
}
- pub fn orn(
+ pub fn ornShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
@@ -1213,7 +1249,7 @@ pub const Instruction = union(enum) {
return logicalShiftedRegister(0b01, 0b1, rd, rn, rm, shift, amount);
}
- pub fn eor(
+ pub fn eorShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
@@ -1223,7 +1259,7 @@ pub const Instruction = union(enum) {
return logicalShiftedRegister(0b10, 0b0, rd, rn, rm, shift, amount);
}
- pub fn eon(
+ pub fn eonShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
@@ -1233,7 +1269,7 @@ pub const Instruction = union(enum) {
return logicalShiftedRegister(0b10, 0b1, rd, rn, rm, shift, amount);
}
- pub fn ands(
+ pub fn andsShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
@@ -1243,7 +1279,7 @@ pub const Instruction = union(enum) {
return logicalShiftedRegister(0b11, 0b0, rd, rn, rm, shift, amount);
}
- pub fn bics(
+ pub fn bicsShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
@@ -1271,6 +1307,24 @@ pub const Instruction = union(enum) {
return addSubtractImmediate(0b1, 0b1, rd, rn, imm, shift);
}
+ // Logical (immediate)
+
+ pub fn andImmediate(rd: Register, rn: Register, imms: u6, immr: u6, n: u1) Instruction {
+ return logicalImmediate(0b00, rd, rn, imms, immr, n);
+ }
+
+ pub fn orrImmediate(rd: Register, rn: Register, imms: u6, immr: u6, n: u1) Instruction {
+ return logicalImmediate(0b01, rd, rn, imms, immr, n);
+ }
+
+ pub fn eorImmediate(rd: Register, rn: Register, imms: u6, immr: u6, n: u1) Instruction {
+ return logicalImmediate(0b10, rd, rn, imms, immr, n);
+ }
+
+ pub fn andsImmediate(rd: Register, rn: Register, imms: u6, immr: u6, n: u1) Instruction {
+ return logicalImmediate(0b11, rd, rn, imms, immr, n);
+ }
+
// Add/subtract (shifted register)
pub fn addShiftedRegister(
@@ -1378,11 +1432,11 @@ test "serialize instructions" {
const testcases = [_]Testcase{
.{ // orr x0, xzr, x1
- .inst = Instruction.orr(.x0, .xzr, .x1, .lsl, 0),
+ .inst = Instruction.orrShiftedRegister(.x0, .xzr, .x1, .lsl, 0),
.expected = 0b1_01_01010_00_0_00001_000000_11111_00000,
},
.{ // orn x0, xzr, x1
- .inst = Instruction.orn(.x0, .xzr, .x1, .lsl, 0),
+ .inst = Instruction.ornShiftedRegister(.x0, .xzr, .x1, .lsl, 0),
.expected = 0b1_01_01010_00_1_00001_000000_11111_00000,
},
.{ // movz x1, #4
@@ -1502,11 +1556,11 @@ test "serialize instructions" {
.expected = 0b10_101_0_001_1_0000010_00010_11111_00001,
},
.{ // and x0, x4, x2
- .inst = Instruction.@"and"(.x0, .x4, .x2, .lsl, 0),
+ .inst = Instruction.andShiftedRegister(.x0, .x4, .x2, .lsl, 0),
.expected = 0b1_00_01010_00_0_00010_000000_00100_00000,
},
.{ // and x0, x4, x2, lsl #0x8
- .inst = Instruction.@"and"(.x0, .x4, .x2, .lsl, 0x8),
+ .inst = Instruction.andShiftedRegister(.x0, .x4, .x2, .lsl, 0x8),
.expected = 0b1_00_01010_00_0_00010_001000_00100_00000,
},
.{ // add x0, x10, #10
@@ -1537,6 +1591,10 @@ test "serialize instructions" {
.inst = Instruction.mul(.x1, .x4, .x9),
.expected = 0b1_00_11011_000_01001_0_11111_00100_00001,
},
+ .{ // eor x3, x5, #1
+ .inst = Instruction.eorImmediate(.x3, .x5, 0b000000, 0b000000, 0b1),
+ .expected = 0b1_10_100100_1_000000_000000_00101_00011,
+ },
};
for (testcases) |case| {