aboutsummaryrefslogtreecommitdiff
path: root/src/arch
diff options
context:
space:
mode:
authorJakub Konka <kubkon@jakubkonka.com>2022-05-16 19:45:30 +0200
committerJakub Konka <kubkon@jakubkonka.com>2022-05-19 19:37:29 +0200
commit9e5c8cb008f75c2e570a0e48d5d014e936c103ca (patch)
treeaf9bf9c8457a4137b1fc534e7eac8a0a4400b8ec /src/arch
parent2aee2302515ba444999b82c2e40cbc35dee08baf (diff)
downloadzig-9e5c8cb008f75c2e570a0e48d5d014e936c103ca.tar.gz
zig-9e5c8cb008f75c2e570a0e48d5d014e936c103ca.zip
x64: merge general purpose with simd register into one bitset
This way, we do not have to tweak the `RegisterManager` to handle multiple register types - we have one linear space instead. Additionally we can use the bitset itself to separate the registers into overlapping (the ones that are aliases of differing bitwidths) and nonoverlapping classes (for example, AVX registers do not overlap general purpose registers, thus they can be allocated simultaneously). Another huge benefit of this simple approach is the fact that we can still refer to *all* registers regardless of their class via enum literals which makes the code so much more readable. Finally, `RegisterLock` is universal across different register classes.
Diffstat (limited to 'src/arch')
-rw-r--r--src/arch/x86_64/CodeGen.zig1060
-rw-r--r--src/arch/x86_64/Emit.zig772
-rw-r--r--src/arch/x86_64/Mir.zig66
-rw-r--r--src/arch/x86_64/abi.zig17
-rw-r--r--src/arch/x86_64/bits.zig100
5 files changed, 891 insertions, 1124 deletions
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index e81f2d5435..aa6d04d5d9 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -35,14 +35,10 @@ const caller_preserved_regs = abi.caller_preserved_regs;
const allocatable_registers = abi.allocatable_registers;
const c_abi_int_param_regs = abi.c_abi_int_param_regs;
const c_abi_int_return_regs = abi.c_abi_int_return_regs;
-const RegisterManager = RegisterManagerFn(Self, Register, &allocatable_registers, spillInstruction);
+const RegisterManager = RegisterManagerFn(Self, Register, &allocatable_registers);
const RegisterLock = RegisterManager.RegisterLock;
const Register = bits.Register;
-const AvxRegisterManager = RegisterManagerFn(Self, AvxRegister, &abi.avx_regs, spillInstructionAvx);
-const AvxRegisterLock = AvxRegisterManager.RegisterLock;
-const AvxRegister = bits.AvxRegister;
-
const InnerError = error{
OutOfMemory,
CodegenFail,
@@ -92,8 +88,7 @@ branch_stack: *std.ArrayList(Branch),
// Key is the block instruction
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{},
-register_manager: RegisterManager,
-avx_register_manager: AvxRegisterManager,
+register_manager: RegisterManager = .{},
/// Maps offset to what is stored there.
stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{},
@@ -133,8 +128,6 @@ pub const MCValue = union(enum) {
/// The value is a tuple { wrapped, overflow } where wrapped value is stored in the GP register,
/// and the operation is a signed operation.
register_overflow_signed: Register,
- /// The value is in an AVX register.
- avx_register: AvxRegister,
/// The value is in memory at a hard-coded address.
/// If the type is a pointer, it means the pointer address is at this memory location.
memory: u64,
@@ -202,7 +195,6 @@ pub const MCValue = union(enum) {
fn isRegister(mcv: MCValue) bool {
return switch (mcv) {
.register => true,
- .avx_register => true,
else => false,
};
}
@@ -304,11 +296,7 @@ pub fn generate(
.mir_to_air_map = if (builtin.mode == .Debug)
std.AutoHashMap(Mir.Inst.Index, Air.Inst.Index).init(bin_file.allocator)
else {},
- .register_manager = undefined,
- .avx_register_manager = undefined,
};
- function.register_manager = .{ .function = &function };
- function.avx_register_manager = .{ .function = &function };
defer function.stack.deinit(bin_file.allocator);
defer function.blocks.deinit(bin_file.allocator);
defer function.exitlude_jump_relocs.deinit(bin_file.allocator);
@@ -400,17 +388,15 @@ fn gen(self: *Self) InnerError!void {
if (cc != .Naked) {
_ = try self.addInst(.{
.tag = .push,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = .rbp,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp }),
.data = undefined, // unused for push reg,
});
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = .rbp,
.reg2 = .rsp,
- }).encode(),
+ }),
.data = undefined,
});
// We want to subtract the aligned stack frame size from rsp here, but we don't
@@ -447,9 +433,7 @@ fn gen(self: *Self) InnerError!void {
// push the callee_preserved_regs that were used
const backpatch_push_callee_preserved_regs_i = try self.addInst(.{
.tag = .push_regs_from_callee_preserved_regs,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = .rbp,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp }),
.data = .{ .payload = undefined }, // to be backpatched
});
@@ -489,9 +473,7 @@ fn gen(self: *Self) InnerError!void {
// pop the callee_preserved_regs
_ = try self.addInst(.{
.tag = .pop_regs_from_callee_preserved_regs,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = .rbp,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp }),
.data = .{ .payload = callee_preserved_regs_payload },
});
@@ -510,17 +492,13 @@ fn gen(self: *Self) InnerError!void {
_ = try self.addInst(.{
.tag = .pop,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = .rbp,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp }),
.data = undefined,
});
_ = try self.addInst(.{
.tag = .ret,
- .ops = (Mir.Ops(Register, Register){
- .flags = 0b11,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .flags = 0b11 }),
.data = undefined,
});
@@ -534,16 +512,12 @@ fn gen(self: *Self) InnerError!void {
if (aligned_stack_end > 0) {
self.mir_instructions.set(backpatch_stack_sub, .{
.tag = .sub,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = .rsp,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }),
.data = .{ .imm = aligned_stack_end },
});
self.mir_instructions.set(backpatch_stack_add, .{
.tag = .add,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = .rsp,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }),
.data = .{ .imm = aligned_stack_end },
});
}
@@ -908,8 +882,8 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
// TODO check if AVX available
const ptr_bytes: u64 = 32;
if (abi_size <= ptr_bytes) {
- if (self.avx_register_manager.tryAllocReg(inst)) |reg| {
- return MCValue{ .avx_register = avxRegisterAlias(reg, abi_size) };
+ if (self.register_manager.tryAllocReg(inst)) |reg| {
+ return MCValue{ .register = registerAlias(reg, abi_size) };
}
}
},
@@ -947,21 +921,6 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv, .{});
}
-pub fn spillInstructionAvx(self: *Self, reg: AvxRegister, inst: Air.Inst.Index) !void {
- const stack_mcv = try self.allocRegOrMem(inst, false);
- log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv });
- const reg_mcv = self.getResolvedInstValue(inst);
- switch (reg_mcv) {
- .avx_register => |other| {
- assert(reg.to256() == other.to256());
- },
- else => {},
- }
- const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
- try branch.inst_table.put(self.gpa, inst, stack_mcv);
- try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv, .{});
-}
-
pub fn spillCompareFlagsIfOccupied(self: *Self) !void {
if (self.compare_flags_inst) |inst_to_save| {
const mcv = self.getResolvedInstValue(inst_to_save);
@@ -972,7 +931,6 @@ pub fn spillCompareFlagsIfOccupied(self: *Self) !void {
.compare_flags_signed,
.compare_flags_unsigned,
=> try self.allocRegOrMem(inst_to_save, true),
- .avx_register => try self.allocRegOrMem(inst_to_save, false),
else => unreachable,
};
@@ -990,7 +948,6 @@ pub fn spillCompareFlagsIfOccupied(self: *Self) !void {
.register_overflow_signed,
.register_overflow_unsigned,
=> |reg| self.register_manager.freeReg(reg),
- .avx_register => |reg| self.avx_register_manager.freeReg(reg),
else => {},
}
}
@@ -1236,10 +1193,10 @@ fn airMin(self: *Self, inst: Air.Inst.Index) !void {
const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ty, rhs_mcv);
_ = try self.addInst(.{
.tag = if (signedness == .signed) .cond_mov_lt else .cond_mov_below,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = dst_mcv.register,
.reg2 = lhs_reg,
- }).encode(),
+ }),
.data = undefined,
});
@@ -1440,10 +1397,10 @@ fn genSetStackTruncatedOverflowCompare(
};
_ = try self.addInst(.{
.tag = .cond_set_byte_overflow,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = overflow_reg.to8(),
.flags = flags,
- }).encode(),
+ }),
.data = undefined,
});
@@ -1460,10 +1417,7 @@ fn genSetStackTruncatedOverflowCompare(
const eq_reg = temp_regs[2];
_ = try self.addInst(.{
.tag = .cond_set_byte_eq_ne,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = eq_reg.to8(),
- .flags = 0b00,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = eq_reg.to8() }),
.data = undefined,
});
@@ -1609,19 +1563,17 @@ fn genIntMulDivOpMir(
.signed => {
_ = try self.addInst(.{
.tag = .cwd,
- .ops = (Mir.Ops(Register, Register){
- .flags = 0b11,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .flags = 0b11 }),
.data = undefined,
});
},
.unsigned => {
_ = try self.addInst(.{
.tag = .xor,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = .rdx,
.reg2 = .rdx,
- }).encode(),
+ }),
.data = undefined,
});
},
@@ -1640,16 +1592,14 @@ fn genIntMulDivOpMir(
.register => |reg| {
_ = try self.addInst(.{
.tag = tag,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = reg,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }),
.data = undefined,
});
},
.stack_offset => |off| {
_ = try self.addInst(.{
.tag = tag,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg2 = .rbp,
.flags = switch (abi_size) {
1 => 0b00,
@@ -1658,7 +1608,7 @@ fn genIntMulDivOpMir(
8 => 0b11,
else => unreachable,
},
- }).encode(),
+ }),
.data = .{ .imm = @bitCast(u32, -off) },
});
},
@@ -1691,34 +1641,34 @@ fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCVa
_ = try self.addInst(.{
.tag = .xor,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = divisor.to64(),
.reg2 = dividend.to64(),
- }).encode(),
+ }),
.data = undefined,
});
_ = try self.addInst(.{
.tag = .sar,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = divisor.to64(),
.flags = 0b10,
- }).encode(),
+ }),
.data = .{ .imm = 63 },
});
_ = try self.addInst(.{
.tag = .@"test",
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = .rdx,
.reg2 = .rdx,
- }).encode(),
+ }),
.data = undefined,
});
_ = try self.addInst(.{
.tag = .cond_mov_eq,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = divisor.to64(),
.reg2 = .rdx,
- }).encode(),
+ }),
.data = undefined,
});
try self.genBinOpMir(.add, Type.isize, .{ .register = divisor }, .{ .register = .rax });
@@ -2102,11 +2052,11 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
// mov reg, [rbp - 8]
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = addr_reg.to64(),
.reg2 = .rbp,
.flags = 0b01,
- }).encode(),
+ }),
.data = .{ .imm = @bitCast(u32, -@intCast(i32, off)) },
});
},
@@ -2187,10 +2137,10 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
// lea reg, [rbp]
_ = try self.addInst(.{
.tag = .lea,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = addr_reg.to64(),
.reg2 = .rbp,
- }).encode(),
+ }),
.data = .{ .imm = @bitCast(u32, -off) },
});
},
@@ -2198,10 +2148,10 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
// lea reg, [rbp]
_ = try self.addInst(.{
.tag = .lea,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = addr_reg.to64(),
.reg2 = .rbp,
- }).encode(),
+ }),
.data = .{ .imm = @bitCast(u32, -off) },
});
},
@@ -2266,11 +2216,11 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
// mov dst_mcv, [dst_mcv]
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
- .flags = 0b01,
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(dst_mcv.register, @intCast(u32, elem_abi_size)),
.reg2 = dst_mcv.register,
- }).encode(),
+ .flags = 0b01,
+ }),
.data = .{ .imm = 0 },
});
break :result .{ .register = registerAlias(dst_mcv.register, @intCast(u32, elem_abi_size)) };
@@ -2532,11 +2482,11 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
// mov dst_reg, [reg]
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(dst_reg, @intCast(u32, abi_size)),
.reg2 = reg,
.flags = 0b01,
- }).encode(),
+ }),
.data = .{ .imm = 0 },
});
},
@@ -2552,9 +2502,6 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
else => return self.fail("TODO implement loading from register into {}", .{dst_mcv}),
}
},
- .avx_register => {
- return self.fail("TODO load for AVX register", .{});
- },
.memory,
.got_load,
.direct_load,
@@ -2606,10 +2553,10 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue
const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl);
_ = try self.addInst(.{
.tag = .lea_pie,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(reg, abi_size),
.flags = flags,
- }).encode(),
+ }),
.data = .{
.load_reloc = .{
.atom_index = fn_owner_decl.link.macho.local_sym_index,
@@ -2670,7 +2617,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
});
_ = try self.addInst(.{
.tag = .mov_mem_imm,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = reg.to64(),
.flags = switch (abi_size) {
1 => 0b00,
@@ -2678,7 +2625,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
4 => 0b10,
else => unreachable,
},
- }).encode(),
+ }),
.data = .{ .payload = payload },
});
},
@@ -2692,11 +2639,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
const tmp_reg = try self.copyToTmpRegister(value_ty, value);
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = reg.to64(),
.reg2 = tmp_reg.to64(),
.flags = 0b10,
- }).encode(),
+ }),
.data = .{ .imm = 0 },
});
},
@@ -2708,11 +2655,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.register => |src_reg| {
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = reg.to64(),
.reg2 = registerAlias(src_reg, @intCast(u32, abi_size)),
.flags = 0b10,
- }).encode(),
+ }),
.data = .{ .imm = 0 },
});
},
@@ -2736,9 +2683,6 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
},
}
},
- .avx_register => {
- return self.fail("TODO store for AVX register", .{});
- },
.got_load,
.direct_load,
.memory,
@@ -2759,11 +2703,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
// mov reg, [reg]
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = addr_reg.to64(),
.reg2 = addr_reg.to64(),
.flags = 0b01,
- }).encode(),
+ }),
.data = .{ .imm = 0 },
});
@@ -2798,21 +2742,21 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
}
_ = try self.addInst(.{
.tag = .mov_mem_imm,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = addr_reg.to64(),
.flags = flags,
- }).encode(),
+ }),
.data = .{ .payload = payload },
});
},
.register => |reg| {
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = addr_reg.to64(),
.reg2 = reg,
.flags = 0b10,
- }).encode(),
+ }),
.data = .{ .imm = 0 },
});
},
@@ -2829,20 +2773,20 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = tmp_reg,
.reg2 = tmp_reg,
.flags = 0b01,
- }).encode(),
+ }),
.data = .{ .imm = 0 },
});
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = addr_reg.to64(),
.reg2 = tmp_reg,
.flags = 0b10,
- }).encode(),
+ }),
.data = .{ .imm = 0 },
});
return;
@@ -2856,11 +2800,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
const tmp_reg = try self.copyToTmpRegister(value_ty, value);
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = addr_reg.to64(),
.reg2 = tmp_reg,
.flags = 0b10,
- }).encode(),
+ }),
.data = .{ .imm = 0 },
});
return;
@@ -3017,10 +2961,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
if (signedness == .signed and field_size < 8) {
_ = try self.addInst(.{
.tag = .mov_sign_extend,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = dst_mcv.register,
.reg2 = registerAlias(dst_mcv.register, field_size),
- }).encode(),
+ }),
.data = undefined,
});
}
@@ -3048,10 +2992,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
};
_ = try self.addInst(.{
.tag = .cond_set_byte_overflow,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = dst_reg.to8(),
.flags = flags,
- }).encode(),
+ }),
.data = undefined,
});
break :result MCValue{ .register = dst_reg.to8() };
@@ -3092,10 +3036,7 @@ fn genShiftBinOpMir(self: *Self, tag: Mir.Inst.Tag, ty: Type, reg: Register, shi
1 => {
_ = try self.addInst(.{
.tag = tag,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = registerAlias(reg, abi_size),
- .flags = 0b00,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size) }),
.data = undefined,
});
return;
@@ -3103,10 +3044,10 @@ fn genShiftBinOpMir(self: *Self, tag: Mir.Inst.Tag, ty: Type, reg: Register, shi
else => {
_ = try self.addInst(.{
.tag = tag,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(reg, abi_size),
.flags = 0b10,
- }).encode(),
+ }),
.data = .{ .imm = @intCast(u8, imm) },
});
return;
@@ -3124,10 +3065,10 @@ fn genShiftBinOpMir(self: *Self, tag: Mir.Inst.Tag, ty: Type, reg: Register, shi
_ = try self.addInst(.{
.tag = tag,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(reg, abi_size),
.flags = 0b01,
- }).encode(),
+ }),
.data = undefined,
});
}
@@ -3383,32 +3324,6 @@ fn genBinOp(
return self.fail("TODO implement genBinOp for {}", .{lhs_ty.fmtDebug()});
}
- if (lhs_ty.zigTypeTag() == .Float) {
- switch (tag) {
- .add => {
- const dst_reg: AvxRegister = blk: {
- const reg = try self.avx_register_manager.allocReg(null);
- try self.genSetAvxReg(lhs_ty, reg, lhs);
- break :blk reg.to128();
- };
- const dst_lock = self.avx_register_manager.lockRegAssumeUnused(dst_reg);
- defer self.avx_register_manager.unlockReg(dst_lock);
-
- const src_reg: AvxRegister = blk: {
- const reg = try self.avx_register_manager.allocReg(null);
- try self.genSetAvxReg(lhs_ty, reg, rhs);
- break :blk reg.to128();
- };
- const src_lock = self.avx_register_manager.lockRegAssumeUnused(src_reg);
- defer self.avx_register_manager.unlockReg(src_lock);
-
- try self.genBinOpMir(.add_f64, lhs_ty, .{ .avx_register = dst_reg }, .{ .avx_register = src_reg });
- return MCValue{ .avx_register = dst_reg };
- },
- else => unreachable,
- }
- }
-
const is_commutative: bool = switch (tag) {
.add,
.addwrap,
@@ -3526,25 +3441,39 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
const reg = try self.copyToTmpRegister(dst_ty, src_mcv);
return self.genBinOpMir(mir_tag, dst_ty, dst_mcv, .{ .register = reg });
},
- .register => |src_reg| {
- _ = try self.addInst(.{
- .tag = mir_tag,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = registerAlias(dst_reg, abi_size),
- .reg2 = registerAlias(src_reg, abi_size),
- }).encode(),
- .data = undefined,
- });
- },
- .avx_register => {
- return self.fail("TODO genBinOp for AVX register", .{});
+ .register => |src_reg| switch (dst_ty.zigTypeTag()) {
+ .Float => switch (dst_ty.tag()) {
+ .f64 => {
+ _ = try self.addInst(.{
+ .tag = switch (mir_tag) {
+ .add => .add_f64,
+ .cmp => .cmp_f64,
+ else => return self.fail("TODO genBinOpMir for f64 register-register with MIR tag {}", .{mir_tag}),
+ },
+ .ops = Mir.Inst.Ops.encode(.{
+ .reg1 = dst_reg.to128(),
+ .reg2 = src_reg.to128(),
+ }),
+ .data = undefined,
+ });
+ },
+ else => return self.fail("TODO genBinOpMir for float register-register and type {}", .{dst_ty.fmtDebug()}),
+ },
+ else => {
+ _ = try self.addInst(.{
+ .tag = mir_tag,
+ .ops = Mir.Inst.Ops.encode(.{
+ .reg1 = registerAlias(dst_reg, abi_size),
+ .reg2 = registerAlias(src_reg, abi_size),
+ }),
+ .data = undefined,
+ });
+ },
},
.immediate => |imm| {
_ = try self.addInst(.{
.tag = mir_tag,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = registerAlias(dst_reg, abi_size),
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(dst_reg, abi_size) }),
.data = .{ .imm = @truncate(u32, imm) },
});
},
@@ -3567,39 +3496,16 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
}
_ = try self.addInst(.{
.tag = mir_tag,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(dst_reg, abi_size),
.reg2 = .rbp,
.flags = 0b01,
- }).encode(),
+ }),
.data = .{ .imm = @bitCast(u32, -off) },
});
},
}
},
- .avx_register => |dst_reg| {
- switch (src_mcv) {
- .avx_register => |src_reg| {
- switch (dst_ty.zigTypeTag()) {
- .Float => switch (dst_ty.tag()) {
- .f64 => {
- _ = try self.addInst(.{
- .tag = mir_tag,
- .ops = (Mir.Ops(AvxRegister, AvxRegister){
- .reg1 = dst_reg.to128(),
- .reg2 = src_reg.to128(),
- }).encode(),
- .data = undefined,
- });
- },
- else => return self.fail("TODO genBinOp for AVX register and type {}", .{dst_ty.fmtDebug()}),
- },
- else => return self.fail("TODO genBinOp for AVX register and type {}", .{dst_ty.fmtDebug()}),
- }
- },
- else => return self.fail("TODO genBinOp for AVX register", .{}),
- }
- },
.ptr_stack_offset, .stack_offset => |off| {
if (off > math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
@@ -3617,17 +3523,14 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
.register => |src_reg| {
_ = try self.addInst(.{
.tag = mir_tag,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = .rbp,
.reg2 = registerAlias(src_reg, abi_size),
.flags = 0b10,
- }).encode(),
+ }),
.data = .{ .imm = @bitCast(u32, -off) },
});
},
- .avx_register => {
- return self.fail("TODO genBinOp for AVX register", .{});
- },
.immediate => |imm| {
const tag: Mir.Inst.Tag = switch (mir_tag) {
.add => .add_mem_imm,
@@ -3651,10 +3554,10 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
});
_ = try self.addInst(.{
.tag = tag,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = .rbp,
.flags = flags,
- }).encode(),
+ }),
.data = .{ .payload = payload },
});
},
@@ -3697,7 +3600,6 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.ptr_stack_offset => unreachable,
.register_overflow_unsigned => unreachable,
.register_overflow_signed => unreachable,
- .avx_register => unreachable,
.register => |dst_reg| {
switch (src_mcv) {
.none => unreachable,
@@ -3706,15 +3608,14 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.ptr_stack_offset => unreachable,
.register_overflow_unsigned => unreachable,
.register_overflow_signed => unreachable,
- .avx_register => unreachable,
.register => |src_reg| {
// register, register
_ = try self.addInst(.{
.tag = .imul_complex,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(dst_reg, abi_size),
.reg2 = registerAlias(src_reg, abi_size),
- }).encode(),
+ }),
.data = undefined,
});
},
@@ -3724,11 +3625,11 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
if (math.minInt(i32) <= imm and imm <= math.maxInt(i32)) {
_ = try self.addInst(.{
.tag = .imul_complex,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = dst_reg.to32(),
.reg2 = dst_reg.to32(),
.flags = 0b10,
- }).encode(),
+ }),
.data = .{ .imm = @truncate(u32, imm) },
});
} else {
@@ -3740,11 +3641,11 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.stack_offset => |off| {
_ = try self.addInst(.{
.tag = .imul_complex,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(dst_reg, abi_size),
.reg2 = .rbp,
.flags = 0b01,
- }).encode(),
+ }),
.data = .{ .imm = @bitCast(u32, -off) },
});
},
@@ -3770,7 +3671,6 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.ptr_stack_offset => unreachable,
.register_overflow_unsigned => unreachable,
.register_overflow_signed => unreachable,
- .avx_register => unreachable,
.register => |src_reg| {
// copy dst to a register
const dst_reg = try self.copyToTmpRegister(dst_ty, dst_mcv);
@@ -3778,10 +3678,10 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
// register, register
_ = try self.addInst(.{
.tag = .imul_complex,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(dst_reg, abi_size),
.reg2 = registerAlias(src_reg, abi_size),
- }).encode(),
+ }),
.data = undefined,
});
// copy dst_reg back out
@@ -3888,9 +3788,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
fn airBreakpoint(self: *Self) !void {
_ = try self.addInst(.{
.tag = .interrupt,
- .ops = (Mir.Ops{
- .flags = 0b00,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{}),
.data = undefined,
});
return self.finishAirBookkeeping();
@@ -3973,9 +3871,6 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
.ptr_stack_offset => {
return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
},
- .avx_register => {
- return self.fail("TODO implement calling with MCValue.avx_register arg", .{});
- },
.undef => unreachable,
.immediate => unreachable,
.unreach => unreachable,
@@ -3994,9 +3889,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
// Adjust the stack
_ = try self.addInst(.{
.tag = .sub,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = .rsp,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }),
.data = .{ .imm = info.stack_byte_count },
});
}
@@ -4020,9 +3913,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
unreachable;
_ = try self.addInst(.{
.tag = .call,
- .ops = (Mir.Ops(Register, Register){
- .flags = 0b01,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
.data = .{ .imm = @truncate(u32, got_addr) },
});
} else if (func_value.castTag(.extern_fn)) |_| {
@@ -4036,10 +3927,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
try self.genSetReg(Type.initTag(.usize), .rax, mcv);
_ = try self.addInst(.{
.tag = .call,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = .rax,
.flags = 0b01,
- }).encode(),
+ }),
.data = undefined,
});
}
@@ -4054,10 +3945,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
// callq *%rax
_ = try self.addInst(.{
.tag = .call,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = .rax,
.flags = 0b01,
- }).encode(),
+ }),
.data = undefined,
});
} else if (func_value.castTag(.extern_fn)) |func_payload| {
@@ -4089,10 +3980,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
try self.genSetReg(Type.initTag(.usize), .rax, mcv);
_ = try self.addInst(.{
.tag = .call,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = .rax,
.flags = 0b01,
- }).encode(),
+ }),
.data = undefined,
});
}
@@ -4107,9 +3998,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const fn_got_addr = got_addr + got_index * ptr_bytes;
_ = try self.addInst(.{
.tag = .call,
- .ops = (Mir.Ops(Register, Register){
- .flags = 0b01,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
.data = .{ .imm = @intCast(u32, fn_got_addr) },
});
} else return self.fail("TODO implement calling extern fn on plan9", .{});
@@ -4119,10 +4008,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
try self.genSetReg(Type.initTag(.usize), .rax, mcv);
_ = try self.addInst(.{
.tag = .call,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = .rax,
.flags = 0b01,
- }).encode(),
+ }),
.data = undefined,
});
}
@@ -4132,9 +4021,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
// Readjust the stack
_ = try self.addInst(.{
.tag = .add,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = .rsp,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }),
.data = .{ .imm = info.stack_byte_count },
});
}
@@ -4192,9 +4079,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
// which is available if the jump is 127 bytes or less forward.
const jmp_reloc = try self.addInst(.{
.tag = .jmp,
- .ops = (Mir.Ops(Register, Register){
- .flags = 0b00,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{}),
.data = .{ .inst = undefined },
});
try self.exitlude_jump_relocs.append(self.gpa, jmp_reloc);
@@ -4227,9 +4112,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
// which is available if the jump is 127 bytes or less forward.
const jmp_reloc = try self.addInst(.{
.tag = .jmp,
- .ops = (Mir.Ops(Register, Register){
- .flags = 0b00,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{}),
.data = .{ .inst = undefined },
});
try self.exitlude_jump_relocs.append(self.gpa, jmp_reloc);
@@ -4256,37 +4139,6 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
self.compare_flags_inst = inst;
const result: MCValue = result: {
- if (ty.zigTypeTag() == .Float) {
- const lhs = try self.resolveInst(bin_op.lhs);
- const rhs = try self.resolveInst(bin_op.rhs);
-
- const dst_reg: AvxRegister = blk: {
- const reg = try self.avx_register_manager.allocReg(null);
- try self.genSetAvxReg(ty, reg, lhs);
- break :blk reg.to128();
- };
- const dst_lock = self.avx_register_manager.lockRegAssumeUnused(dst_reg);
- defer self.avx_register_manager.unlockReg(dst_lock);
-
- const src_reg: AvxRegister = blk: {
- const reg = try self.avx_register_manager.allocReg(null);
- try self.genSetAvxReg(ty, reg, rhs);
- break :blk reg.to128();
- };
- const src_lock = self.avx_register_manager.lockRegAssumeUnused(src_reg);
- defer self.avx_register_manager.unlockReg(src_lock);
-
- _ = try self.addInst(.{
- .tag = .cmp_f64,
- .ops = (Mir.Ops(AvxRegister, AvxRegister){
- .reg1 = dst_reg,
- .reg2 = src_reg,
- }).encode(),
- .data = undefined,
- });
-
- break :result MCValue{ .compare_flags_unsigned = op };
- }
// There are 2 operands, destination and source.
// Either one, but not both, can be a memory operand.
// Source operand can be an immediate, 8 bits or 32 bits.
@@ -4304,8 +4156,28 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const dst_mcv = MCValue{ .register = dst_reg };
+ const rhs_ty = self.air.typeOf(bin_op.rhs);
// This instruction supports only signed 32-bit immediates at most.
- const src_mcv = try self.limitImmediateType(bin_op.rhs, i32);
+ const src_mcv: MCValue = blk: {
+ switch (rhs_ty.zigTypeTag()) {
+ .Float => {
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const rhs_lock: ?RegisterLock = switch (rhs) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
+ const src_reg = try self.copyToTmpRegister(rhs_ty, rhs);
+ break :blk MCValue{ .register = src_reg };
+ },
+ else => break :blk try self.limitImmediateType(bin_op.rhs, i32),
+ }
+ };
+ const src_lock: ?RegisterLock = switch (src_mcv) {
+ .register => |reg| self.register_manager.lockReg(reg),
+ else => null,
+ };
+ defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
try self.genBinOpMir(.cmp, ty, dst_mcv, src_mcv);
break :result switch (signedness) {
@@ -4504,9 +4376,7 @@ fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 {
Mir.Inst.Tag.cond_jmp_greater_less;
return self.addInst(.{
.tag = tag,
- .ops = (Mir.Ops(Register, Register){
- .flags = flags,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .flags = flags }),
.data = .{ .inst = undefined },
});
},
@@ -4514,17 +4384,12 @@ fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 {
try self.spillCompareFlagsIfOccupied();
_ = try self.addInst(.{
.tag = .@"test",
- .ops = (Mir.Ops(Register, Register){
- .reg1 = reg,
- .flags = 0b00,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }),
.data = .{ .imm = 1 },
});
return self.addInst(.{
.tag = .cond_jmp_eq_ne,
- .ops = (Mir.Ops(Register, Register){
- .flags = 0b01,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
.data = .{ .inst = undefined },
});
},
@@ -4918,9 +4783,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
try self.genBody(body);
_ = try self.addInst(.{
.tag = .jmp,
- .ops = (Mir.Ops(Register, Register){
- .flags = 0b00,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{}),
.data = .{ .inst = jmp_target },
});
return self.finishAirBookkeeping();
@@ -4971,19 +4834,17 @@ fn genCondSwitchMir(self: *Self, ty: Type, condition: MCValue, case: MCValue) !u
.immediate => |imm| {
_ = try self.addInst(.{
.tag = .xor,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = registerAlias(cond_reg, abi_size),
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(cond_reg, abi_size) }),
.data = .{ .imm = @intCast(u32, imm) },
});
},
.register => |reg| {
_ = try self.addInst(.{
.tag = .xor,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(cond_reg, abi_size),
.reg2 = registerAlias(reg, abi_size),
- }).encode(),
+ }),
.data = undefined,
});
},
@@ -5002,17 +4863,15 @@ fn genCondSwitchMir(self: *Self, ty: Type, condition: MCValue, case: MCValue) !u
_ = try self.addInst(.{
.tag = .@"test",
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(cond_reg, abi_size),
.reg2 = registerAlias(cond_reg, abi_size),
- }).encode(),
+ }),
.data = undefined,
});
return self.addInst(.{
.tag = .cond_jmp_eq_ne,
- .ops = (Mir.Ops(Register, Register){
- .flags = 0b00,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{}),
.data = .{ .inst = undefined },
});
},
@@ -5156,17 +5015,20 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
if (block_mcv == .none) {
block_data.mcv = switch (operand_mcv) {
.none, .dead, .unreach => unreachable,
- .register, .stack_offset, .memory => operand_mcv,
+ .stack_offset, .memory => operand_mcv,
.compare_flags_signed, .compare_flags_unsigned, .immediate => blk: {
const new_mcv = try self.allocRegOrMem(block, true);
try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv);
break :blk new_mcv;
},
- .avx_register => blk: {
- // TODO not needed; return operand_mcv ones we can transfer between XMM registers
- const new_mcv = try self.allocRegOrMem(block, false);
- try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv);
- break :blk new_mcv;
+ .register => blk: {
+ if (self.air.typeOfIndex(block).zigTypeTag() == .Float) {
+ // TODO not needed; return operand_mcv ones we can transfer between XMM registers
+ const new_mcv = try self.allocRegOrMem(block, false);
+ try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv);
+ break :blk new_mcv;
+ }
+ break :blk operand_mcv;
},
else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}),
};
@@ -5184,9 +5046,7 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void {
// Leave the jump offset undefined
const jmp_reloc = try self.addInst(.{
.tag = .jmp,
- .ops = (Mir.Ops(Register, Register){
- .flags = 0b00,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{}),
.data = .{ .inst = undefined },
});
block_data.relocs.appendAssumeCapacity(jmp_reloc);
@@ -5274,9 +5134,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
};
_ = try self.addInst(.{
.tag = .push,
- .ops = (Mir.Ops(Register, Register){
- .flags = 0b10,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .flags = 0b10 }),
.data = .{ .imm = n },
});
} else if (mem.indexOf(u8, arg, "%%")) |l| {
@@ -5285,9 +5143,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
return self.fail("unrecognized register: '{s}'", .{reg_name});
_ = try self.addInst(.{
.tag = .push,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = reg,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }),
.data = undefined,
});
} else return self.fail("TODO more push operands", .{});
@@ -5299,9 +5155,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
return self.fail("unrecognized register: '{s}'", .{reg_name});
_ = try self.addInst(.{
.tag = .pop,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = reg,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }),
.data = undefined,
});
} else return self.fail("TODO more pop operands", .{});
@@ -5365,7 +5219,6 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
.none => return,
.immediate => unreachable,
.register => |reg| return self.genSetReg(ty, reg, val),
- .avx_register => |reg| return self.genSetAvxReg(ty, reg, val),
.stack_offset => |off| return self.genSetStack(ty, off, val, .{}),
.memory => {
return self.fail("TODO implement setRegOrMem for memory", .{});
@@ -5396,9 +5249,6 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE
.register_overflow_unsigned,
.register_overflow_signed,
=> return self.fail("TODO genSetStackArg for register with overflow bit", .{}),
- .avx_register => {
- return self.fail("TODO genSetStackArg for AVX register", .{});
- },
.compare_flags_unsigned,
.compare_flags_signed,
=> {
@@ -5417,7 +5267,7 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE
});
_ = try self.addInst(.{
.tag = .mov_mem_imm,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = .rsp,
.flags = switch (abi_size) {
1 => 0b00,
@@ -5425,7 +5275,7 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE
4 => 0b10,
else => unreachable,
},
- }).encode(),
+ }),
.data = .{ .payload = payload },
});
},
@@ -5453,11 +5303,11 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE
.register => |reg| {
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = .rsp,
.reg2 = registerAlias(reg, @intCast(u32, abi_size)),
.flags = 0b10,
- }).encode(),
+ }),
.data = .{ .imm = @bitCast(u32, -stack_offset) },
});
},
@@ -5520,10 +5370,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
};
_ = try self.addInst(.{
.tag = .cond_set_byte_overflow,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = tmp_reg.to8(),
.flags = flags,
- }).encode(),
+ }),
.data = undefined,
});
@@ -5550,7 +5400,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
});
_ = try self.addInst(.{
.tag = .mov_mem_imm,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = base_reg,
.flags = switch (abi_size) {
1 => 0b00,
@@ -5558,7 +5408,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
4 => 0b10,
else => unreachable,
},
- }).encode(),
+ }),
.data = .{ .payload = payload },
});
},
@@ -5572,10 +5422,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
});
_ = try self.addInst(.{
.tag = .mov_mem_imm,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = base_reg,
.flags = 0b10,
- }).encode(),
+ }),
.data = .{ .payload = payload },
});
}
@@ -5586,10 +5436,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
});
_ = try self.addInst(.{
.tag = .mov_mem_imm,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = base_reg,
.flags = 0b10,
- }).encode(),
+ }),
.data = .{ .payload = payload },
});
}
@@ -5605,65 +5455,64 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
}
const base_reg = opts.dest_stack_base orelse .rbp;
- if (!math.isPowerOfTwo(abi_size)) {
- const reg_lock = self.register_manager.lockReg(reg);
- defer if (reg_lock) |lock| self.register_manager.unlockReg(lock);
-
- const tmp_reg = try self.copyToTmpRegister(ty, mcv);
-
- var next_offset = stack_offset;
- var remainder = abi_size;
- while (remainder > 0) {
- const nearest_power_of_two = @as(u6, 1) << math.log2_int(u3, @intCast(u3, remainder));
-
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = base_reg,
- .reg2 = registerAlias(tmp_reg, nearest_power_of_two),
- .flags = 0b10,
- }).encode(),
- .data = .{ .imm = @bitCast(u32, -next_offset) },
- });
- if (nearest_power_of_two > 1) {
- try self.genShiftBinOpMir(.shr, ty, tmp_reg, .{ .immediate = nearest_power_of_two * 8 });
- }
-
- remainder -= nearest_power_of_two;
- next_offset -= nearest_power_of_two;
- }
- } else {
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = base_reg,
- .reg2 = registerAlias(reg, @intCast(u32, abi_size)),
- .flags = 0b10,
- }).encode(),
- .data = .{ .imm = @bitCast(u32, -stack_offset) },
- });
- }
- },
- .avx_register => |reg| {
- const base_reg = opts.dest_stack_base orelse .rbp;
switch (ty.zigTypeTag()) {
.Float => switch (ty.tag()) {
- .f32 => return self.fail("TODO genSetStack for AVX register for f32", .{}),
+ .f32 => return self.fail("TODO genSetStack for register for f32", .{}),
.f64 => {
_ = try self.addInst(.{
.tag = .mov_f64,
- .ops = (Mir.Ops(Register, AvxRegister){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = base_reg,
.reg2 = reg.to128(),
.flags = 0b01,
- }).encode(),
+ }),
.data = .{ .imm = @bitCast(u32, -stack_offset) },
});
},
- else => return self.fail("TODO genSetStack for AVX register for type {}", .{ty.fmtDebug()}),
+ else => return self.fail("TODO genSetStack for register for type {}", .{ty.fmtDebug()}),
+ },
+ else => {
+ if (!math.isPowerOfTwo(abi_size)) {
+ const reg_lock = self.register_manager.lockReg(reg);
+ defer if (reg_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const tmp_reg = try self.copyToTmpRegister(ty, mcv);
+
+ var next_offset = stack_offset;
+ var remainder = abi_size;
+ while (remainder > 0) {
+ const nearest_power_of_two = @as(u6, 1) << math.log2_int(u3, @intCast(u3, remainder));
+
+ _ = try self.addInst(.{
+ .tag = .mov,
+ .ops = Mir.Inst.Ops.encode(.{
+ .reg1 = base_reg,
+ .reg2 = registerAlias(tmp_reg, nearest_power_of_two),
+ .flags = 0b10,
+ }),
+ .data = .{ .imm = @bitCast(u32, -next_offset) },
+ });
+
+ if (nearest_power_of_two > 1) {
+ try self.genShiftBinOpMir(.shr, ty, tmp_reg, .{ .immediate = nearest_power_of_two * 8 });
+ }
+
+ remainder -= nearest_power_of_two;
+ next_offset -= nearest_power_of_two;
+ }
+ } else {
+ _ = try self.addInst(.{
+ .tag = .mov,
+ .ops = Mir.Inst.Ops.encode(.{
+ .reg1 = base_reg,
+ .reg2 = registerAlias(reg, @intCast(u32, abi_size)),
+ .flags = 0b10,
+ }),
+ .data = .{ .imm = @bitCast(u32, -stack_offset) },
+ });
+ }
},
- else => return self.fail("TODO genSetStack for AVX register for type {}", .{ty.fmtDebug()}),
}
},
.memory,
@@ -5742,20 +5591,20 @@ fn genInlineMemcpy(
.ptr_stack_offset, .stack_offset => |off| {
_ = try self.addInst(.{
.tag = .lea,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = dst_addr_reg.to64(),
.reg2 = opts.dest_stack_base orelse .rbp,
- }).encode(),
+ }),
.data = .{ .imm = @bitCast(u32, -off) },
});
},
.register => |reg| {
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(dst_addr_reg, @divExact(reg.size(), 8)),
.reg2 = reg,
- }).encode(),
+ }),
.data = undefined,
});
},
@@ -5777,20 +5626,20 @@ fn genInlineMemcpy(
.ptr_stack_offset, .stack_offset => |off| {
_ = try self.addInst(.{
.tag = .lea,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = src_addr_reg.to64(),
.reg2 = opts.source_stack_base orelse .rbp,
- }).encode(),
+ }),
.data = .{ .imm = @bitCast(u32, -off) },
});
},
.register => |reg| {
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(src_addr_reg, @divExact(reg.size(), 8)),
.reg2 = reg,
- }).encode(),
+ }),
.data = undefined,
});
},
@@ -5810,18 +5659,14 @@ fn genInlineMemcpy(
// mov rcx, 0
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = .rcx,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rcx }),
.data = .{ .imm = 0 },
});
// mov rax, 0
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = .rax,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rax }),
.data = .{ .imm = 0 },
});
@@ -5829,70 +5674,62 @@ fn genInlineMemcpy(
// cmp count, 0
const loop_start = try self.addInst(.{
.tag = .cmp,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = count_reg,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = count_reg }),
.data = .{ .imm = 0 },
});
// je end
const loop_reloc = try self.addInst(.{
.tag = .cond_jmp_eq_ne,
- .ops = (Mir.Ops(Register, Register){ .flags = 0b01 }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
.data = .{ .inst = undefined },
});
// mov tmp, [addr + rcx]
_ = try self.addInst(.{
.tag = .mov_scale_src,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = tmp_reg.to8(),
.reg2 = src_addr_reg,
- }).encode(),
+ }),
.data = .{ .imm = 0 },
});
// mov [stack_offset + rax], tmp
_ = try self.addInst(.{
.tag = .mov_scale_dst,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = dst_addr_reg,
.reg2 = tmp_reg.to8(),
- }).encode(),
+ }),
.data = .{ .imm = 0 },
});
// add rcx, 1
_ = try self.addInst(.{
.tag = .add,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = .rcx,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rcx }),
.data = .{ .imm = 1 },
});
// add rax, 1
_ = try self.addInst(.{
.tag = .add,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = .rax,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rax }),
.data = .{ .imm = 1 },
});
// sub count, 1
_ = try self.addInst(.{
.tag = .sub,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = count_reg,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = count_reg }),
.data = .{ .imm = 1 },
});
// jmp loop
_ = try self.addInst(.{
.tag = .jmp,
- .ops = (Mir.Ops(Register, Register){ .flags = 0b00 }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{}),
.data = .{ .inst = loop_start },
});
@@ -5924,20 +5761,20 @@ fn genInlineMemset(
.ptr_stack_offset, .stack_offset => |off| {
_ = try self.addInst(.{
.tag = .lea,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = addr_reg.to64(),
.reg2 = opts.dest_stack_base orelse .rbp,
- }).encode(),
+ }),
.data = .{ .imm = @bitCast(u32, -off) },
});
},
.register => |reg| {
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(addr_reg, @divExact(reg.size(), 8)),
.reg2 = reg,
- }).encode(),
+ }),
.data = undefined,
});
},
@@ -5955,16 +5792,14 @@ fn genInlineMemset(
// cmp rax, -1
const loop_start = try self.addInst(.{
.tag = .cmp,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = .rax,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rax }),
.data = .{ .imm = @bitCast(u32, @as(i32, -1)) },
});
// je end
const loop_reloc = try self.addInst(.{
.tag = .cond_jmp_eq_ne,
- .ops = (Mir.Ops(Register, Register){ .flags = 0b01 }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
.data = .{ .inst = undefined },
});
@@ -5980,9 +5815,7 @@ fn genInlineMemset(
});
_ = try self.addInst(.{
.tag = .mov_mem_index_imm,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = addr_reg,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = addr_reg }),
.data = .{ .payload = payload },
});
},
@@ -5992,16 +5825,14 @@ fn genInlineMemset(
// sub rax, 1
_ = try self.addInst(.{
.tag = .sub,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = .rax,
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rax }),
.data = .{ .imm = 1 },
});
// jmp loop
_ = try self.addInst(.{
.tag = .jmp,
- .ops = (Mir.Ops(Register, Register){ .flags = 0b00 }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{}),
.data = .{ .inst = loop_start },
});
@@ -6009,88 +5840,6 @@ fn genInlineMemset(
try self.performReloc(loop_reloc);
}
-fn genSetAvxReg(self: *Self, ty: Type, reg: AvxRegister, mcv: MCValue) InnerError!void {
- switch (mcv) {
- .dead => unreachable,
- .register_overflow_unsigned,
- .register_overflow_signed,
- => unreachable,
- .stack_offset => |off| {
- if (off < std.math.minInt(i32) or off > std.math.maxInt(i32)) {
- return self.fail("stack offset too large", .{});
- }
-
- switch (ty.zigTypeTag()) {
- .Float => {
- switch (ty.tag()) {
- .f32 => return self.fail("TODO genSetAvxReg from stack offset for f32", .{}),
- .f64 => {
- _ = try self.addInst(.{
- .tag = .mov_f64,
- .ops = (Mir.Ops(AvxRegister, Register){
- .reg1 = reg.to128(),
- .reg2 = .rbp,
- }).encode(),
- .data = .{ .imm = @bitCast(u32, -off) },
- });
- },
- else => return self.fail("TODO genSetAvxReg from stack offset for {}", .{ty.fmtDebug()}),
- }
- },
- else => return self.fail("TODO genSetAvxReg from stack offset for type {}", .{ty.fmtDebug()}),
- }
- },
- .avx_register => |src_reg| {
- switch (ty.zigTypeTag()) {
- .Float => {
- switch (ty.tag()) {
- .f32 => return self.fail("TODO genSetAvxReg from register for f32", .{}),
- .f64 => {
- _ = try self.addInst(.{
- .tag = .mov_f64,
- .ops = (Mir.Ops(AvxRegister, AvxRegister){
- .reg1 = reg.to128(),
- .reg2 = src_reg.to128(),
- .flags = 0b10,
- }).encode(),
- .data = undefined,
- });
- },
- else => return self.fail("TODO genSetAvxReg from register for {}", .{ty.fmtDebug()}),
- }
- },
- else => return self.fail("TODO genSetAvxReg from register for type {}", .{ty.fmtDebug()}),
- }
- },
- .memory => {
- switch (ty.zigTypeTag()) {
- .Float => {
- switch (ty.tag()) {
- .f32 => return self.fail("TODO genSetAvxReg from memory for f32", .{}),
- .f64 => {
- const base_reg = try self.register_manager.allocReg(null);
- try self.loadMemPtrIntoRegister(base_reg, Type.usize, mcv);
- _ = try self.addInst(.{
- .tag = .mov_f64,
- .ops = (Mir.Ops(AvxRegister, Register){
- .reg1 = reg.to128(),
- .reg2 = base_reg.to64(),
- }).encode(),
- .data = .{ .imm = 0 },
- });
- },
- else => return self.fail("TODO genSetAvxReg from memory for {}", .{ty.fmtDebug()}),
- }
- },
- else => return self.fail("TODO genSetAvxReg from memory for type {}", .{ty.fmtDebug()}),
- }
- },
- else => |other| {
- return self.fail("TODO genSetAvxReg from {}", .{other});
- },
- }
-}
-
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
const abi_size = @intCast(u32, ty.abiSize(self.target.*));
switch (mcv) {
@@ -6098,17 +5847,16 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.register_overflow_unsigned,
.register_overflow_signed,
=> unreachable,
- .avx_register => unreachable,
.ptr_stack_offset => |off| {
if (off < std.math.minInt(i32) or off > std.math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
_ = try self.addInst(.{
.tag = .lea,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(reg, abi_size),
.reg2 = .rbp,
- }).encode(),
+ }),
.data = .{ .imm = @bitCast(u32, -off) },
});
},
@@ -6145,10 +5893,10 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
};
_ = try self.addInst(.{
.tag = tag,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = reg.to8(),
.flags = flags,
- }).encode(),
+ }),
.data = undefined,
});
},
@@ -6158,10 +5906,10 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
if (x == 0) {
_ = try self.addInst(.{
.tag = .xor,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = reg.to32(),
.reg2 = reg.to32(),
- }).encode(),
+ }),
.data = undefined,
});
return;
@@ -6170,9 +5918,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
// Next best case: if we set the lower four bytes, the upper four will be zeroed.
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = registerAlias(reg, abi_size),
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size) }),
.data = .{ .imm = @truncate(u32, x) },
});
return;
@@ -6187,9 +5933,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
const payload = try self.addExtra(Mir.Imm64.encode(x));
_ = try self.addInst(.{
.tag = .movabs,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = reg.to64(),
- }).encode(),
+ .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg.to64() }),
.data = .{ .payload = payload },
});
},
@@ -6198,40 +5942,60 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
if (src_reg.id() == reg.id())
return;
- if (ty.zigTypeTag() == .Int) blk: {
- switch (ty.intInfo(self.target.*).signedness) {
+ switch (ty.zigTypeTag()) {
+ .Int => switch (ty.intInfo(self.target.*).signedness) {
.signed => {
- if (abi_size > 4) break :blk;
- _ = try self.addInst(.{
- .tag = .mov_sign_extend,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = reg.to64(),
- .reg2 = registerAlias(src_reg, abi_size),
- }).encode(),
- .data = undefined,
- });
+ if (abi_size <= 4) {
+ _ = try self.addInst(.{
+ .tag = .mov_sign_extend,
+ .ops = Mir.Inst.Ops.encode(.{
+ .reg1 = reg.to64(),
+ .reg2 = registerAlias(src_reg, abi_size),
+ }),
+ .data = undefined,
+ });
+ return;
+ }
},
.unsigned => {
- if (abi_size > 2) break :blk;
+ if (abi_size <= 2) {
+ _ = try self.addInst(.{
+ .tag = .mov_zero_extend,
+ .ops = Mir.Inst.Ops.encode(.{
+ .reg1 = reg.to64(),
+ .reg2 = registerAlias(src_reg, abi_size),
+ }),
+ .data = undefined,
+ });
+ return;
+ }
+ },
+ },
+ .Float => switch (ty.tag()) {
+ .f32 => return self.fail("TODO genSetReg from register for f32", .{}),
+ .f64 => {
_ = try self.addInst(.{
- .tag = .mov_zero_extend,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = reg.to64(),
- .reg2 = registerAlias(src_reg, abi_size),
- }).encode(),
+ .tag = .mov_f64,
+ .ops = Mir.Inst.Ops.encode(.{
+ .reg1 = reg.to128(),
+ .reg2 = src_reg.to128(),
+ .flags = 0b10,
+ }),
.data = undefined,
});
+ return;
},
- }
- return;
+ else => return self.fail("TODO genSetReg from register for {}", .{ty.fmtDebug()}),
+ },
+ else => {},
}
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(reg, abi_size),
.reg2 = registerAlias(src_reg, abi_size),
- }).encode(),
+ }),
.data = undefined,
});
},
@@ -6241,107 +6005,148 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
try self.loadMemPtrIntoRegister(reg, Type.usize, mcv);
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(reg, abi_size),
.reg2 = reg.to64(),
.flags = 0b01,
- }).encode(),
+ }),
.data = .{ .imm = 0 },
});
},
- .memory => |x| {
- if (x <= math.maxInt(i32)) {
- // mov reg, [ds:imm32]
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = registerAlias(reg, abi_size),
- .flags = 0b01,
- }).encode(),
- .data = .{ .imm = @truncate(u32, x) },
- });
- } else {
- // If this is RAX, we can use a direct load.
- // Otherwise, we need to load the address, then indirectly load the value.
- if (reg.id() == 0) {
- // movabs rax, ds:moffs64
- const payload = try self.addExtra(Mir.Imm64.encode(x));
- _ = try self.addInst(.{
- .tag = .movabs,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = .rax,
- .flags = 0b01, // imm64 will become moffs64
- }).encode(),
- .data = .{ .payload = payload },
- });
- } else {
- // Rather than duplicate the logic used for the move, we just use a self-call with a new MCValue.
- try self.genSetReg(ty, reg, MCValue{ .immediate = x });
-
- // mov reg, [reg + 0x0]
+ .memory => |x| switch (ty.zigTypeTag()) {
+ .Float => {
+ switch (ty.tag()) {
+ .f32 => return self.fail("TODO genSetReg from memory for f32", .{}),
+ .f64 => {
+ const base_reg = try self.register_manager.allocReg(null);
+ try self.loadMemPtrIntoRegister(base_reg, Type.usize, mcv);
+ _ = try self.addInst(.{
+ .tag = .mov_f64,
+ .ops = Mir.Inst.Ops.encode(.{
+ .reg1 = reg.to128(),
+ .reg2 = base_reg.to64(),
+ }),
+ .data = .{ .imm = 0 },
+ });
+ },
+ else => return self.fail("TODO genSetReg from memory for {}", .{ty.fmtDebug()}),
+ }
+ },
+ else => {
+ if (x <= math.maxInt(i32)) {
+ // mov reg, [ds:imm32]
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(reg, abi_size),
- .reg2 = reg.to64(),
.flags = 0b01,
- }).encode(),
- .data = .{ .imm = 0 },
+ }),
+ .data = .{ .imm = @truncate(u32, x) },
});
+ } else {
+ // If this is RAX, we can use a direct load.
+ // Otherwise, we need to load the address, then indirectly load the value.
+ if (reg.id() == 0) {
+ // movabs rax, ds:moffs64
+ const payload = try self.addExtra(Mir.Imm64.encode(x));
+ _ = try self.addInst(.{
+ .tag = .movabs,
+ .ops = Mir.Inst.Ops.encode(.{
+ .reg1 = .rax,
+ .flags = 0b01, // imm64 will become moffs64
+ }),
+ .data = .{ .payload = payload },
+ });
+ } else {
+ // Rather than duplicate the logic used for the move, we just use a self-call with a new MCValue.
+ try self.genSetReg(ty, reg, MCValue{ .immediate = x });
+
+ // mov reg, [reg + 0x0]
+ _ = try self.addInst(.{
+ .tag = .mov,
+ .ops = Mir.Inst.Ops.encode(.{
+ .reg1 = registerAlias(reg, abi_size),
+ .reg2 = reg.to64(),
+ .flags = 0b01,
+ }),
+ .data = .{ .imm = 0 },
+ });
+ }
}
- }
+ },
},
.stack_offset => |off| {
if (off < std.math.minInt(i32) or off > std.math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
- if (ty.zigTypeTag() == .Int) blk: {
- switch (ty.intInfo(self.target.*).signedness) {
+ switch (ty.zigTypeTag()) {
+ .Int => switch (ty.intInfo(self.target.*).signedness) {
.signed => {
- const flags: u2 = switch (abi_size) {
- 1 => 0b01,
- 2 => 0b10,
- 4 => 0b11,
- else => break :blk,
- };
- _ = try self.addInst(.{
- .tag = .mov_sign_extend,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = reg.to64(),
- .reg2 = .rbp,
- .flags = flags,
- }).encode(),
- .data = .{ .imm = @bitCast(u32, -off) },
- });
+ if (abi_size <= 4) {
+ const flags: u2 = switch (abi_size) {
+ 1 => 0b01,
+ 2 => 0b10,
+ 4 => 0b11,
+ else => unreachable,
+ };
+ _ = try self.addInst(.{
+ .tag = .mov_sign_extend,
+ .ops = Mir.Inst.Ops.encode(.{
+ .reg1 = reg.to64(),
+ .reg2 = .rbp,
+ .flags = flags,
+ }),
+ .data = .{ .imm = @bitCast(u32, -off) },
+ });
+ return;
+ }
},
.unsigned => {
- const flags: u2 = switch (abi_size) {
- 1 => 0b01,
- 2 => 0b10,
- else => break :blk,
- };
+ if (abi_size <= 2) {
+ const flags: u2 = switch (abi_size) {
+ 1 => 0b01,
+ 2 => 0b10,
+ else => unreachable,
+ };
+ _ = try self.addInst(.{
+ .tag = .mov_zero_extend,
+ .ops = Mir.Inst.Ops.encode(.{
+ .reg1 = reg.to64(),
+ .reg2 = .rbp,
+ .flags = flags,
+ }),
+ .data = .{ .imm = @bitCast(u32, -off) },
+ });
+ return;
+ }
+ },
+ },
+ .Float => switch (ty.tag()) {
+ .f32 => return self.fail("TODO genSetReg from stack offset for f32", .{}),
+ .f64 => {
_ = try self.addInst(.{
- .tag = .mov_zero_extend,
- .ops = (Mir.Ops(Register, Register){
- .reg1 = reg.to64(),
+ .tag = .mov_f64,
+ .ops = Mir.Inst.Ops.encode(.{
+ .reg1 = reg.to128(),
.reg2 = .rbp,
- .flags = flags,
- }).encode(),
+ }),
.data = .{ .imm = @bitCast(u32, -off) },
});
+ return;
},
- }
- return;
+ else => return self.fail("TODO genSetReg from stack offset for {}", .{ty.fmtDebug()}),
+ },
+ else => {},
}
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(reg, abi_size),
.reg2 = .rbp,
.flags = 0b01,
- }).encode(),
+ }),
.data = .{ .imm = @bitCast(u32, -off) },
});
},
@@ -6408,14 +6213,14 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void {
};
_ = try self.addInst(.{
.tag = .fld,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
+ .reg1 = .rbp,
.flags = switch (src_ty.abiSize(self.target.*)) {
4 => 0b01,
8 => 0b10,
else => |size| return self.fail("TODO load ST(0) with abiSize={}", .{size}),
},
- .reg1 = .rbp,
- }).encode(),
+ }),
.data = .{ .imm = @bitCast(u32, -stack_offset) },
});
@@ -6423,15 +6228,15 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void {
const stack_dst = try self.allocRegOrMem(inst, false);
_ = try self.addInst(.{
.tag = .fisttp,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
+ .reg1 = .rbp,
.flags = switch (dst_ty.abiSize(self.target.*)) {
1...2 => 0b00,
3...4 => 0b01,
5...8 => 0b10,
else => |size| return self.fail("TODO convert float with abiSize={}", .{size}),
},
- .reg1 = .rbp,
- }).encode(),
+ }),
.data = .{ .imm = @bitCast(u32, -stack_dst.stack_offset) },
});
@@ -6527,11 +6332,11 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
try self.loadMemPtrIntoRegister(reg, src_ty, src_ptr);
_ = try self.addInst(.{
.tag = .mov,
- .ops = (Mir.Ops(Register, Register){
+ .ops = Mir.Inst.Ops.encode(.{
.reg1 = reg,
.reg2 = reg,
.flags = 0b01,
- }).encode(),
+ }),
.data = .{ .imm = 0 },
});
break :blk MCValue{ .register = reg };
@@ -7095,22 +6900,11 @@ fn registerAlias(reg: Register, size_bytes: u32) Register {
return reg.to32();
} else if (size_bytes <= 8) {
return reg.to64();
- } else {
- unreachable;
- }
-}
-
-/// Returns AVX register wide enough to hold at least `size_bytes`.
-fn avxRegisterAlias(reg: AvxRegister, size_bytes: u32) AvxRegister {
- if (size_bytes == 0) {
- unreachable; // should be comptime known
} else if (size_bytes <= 16) {
return reg.to128();
} else if (size_bytes <= 32) {
return reg.to256();
- } else {
- unreachable;
- }
+ } else unreachable;
}
/// Truncates the value in the register in place.
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index 79341df8cd..5ad8e86374 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -25,9 +25,8 @@ const MCValue = @import("CodeGen.zig").MCValue;
const Mir = @import("Mir.zig");
const Module = @import("../../Module.zig");
const Instruction = bits.Instruction;
-const GpRegister = bits.Register;
-const AvxRegister = bits.AvxRegister;
const Type = @import("../../type.zig").Type;
+const Register = bits.Register;
mir: Mir,
bin_file: *link.File,
@@ -238,7 +237,7 @@ fn fixupRelocs(emit: *Emit) InnerError!void {
fn mirInterrupt(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .interrupt);
- const ops = Mir.Ops.decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
switch (ops.flags) {
0b00 => return lowerToZoEnc(.int3, emit.code),
else => return emit.fail("TODO handle variant 0b{b} of interrupt instruction", .{ops.flags}),
@@ -254,11 +253,11 @@ fn mirSyscall(emit: *Emit) InnerError!void {
}
fn mirPushPop(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
switch (ops.flags) {
0b00 => {
// PUSH/POP reg
- return lowerToOEnc(tag, Register.reg(ops.reg1), emit.code);
+ return lowerToOEnc(tag, ops.reg1, emit.code);
},
0b01 => {
// PUSH/POP r/m64
@@ -283,7 +282,7 @@ fn mirPushPop(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
}
fn mirPushPopRegsFromCalleePreservedRegs(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
const payload = emit.mir.instructions.items(.data)[inst].payload;
const data = emit.mir.extraData(Mir.RegsToPushOrPop, payload).data;
const regs = data.regs;
@@ -294,9 +293,9 @@ fn mirPushPopRegsFromCalleePreservedRegs(emit: *Emit, tag: Tag, inst: Mir.Inst.I
try lowerToMrEnc(.mov, RegisterOrMemory.mem(.qword_ptr, .{
.disp = @bitCast(u32, -@intCast(i32, disp)),
.base = ops.reg1,
- }), Register.reg(reg.to64()), emit.code);
+ }), reg.to64(), emit.code);
} else {
- try lowerToRmEnc(.mov, Register.reg(reg.to64()), RegisterOrMemory.mem(.qword_ptr, .{
+ try lowerToRmEnc(.mov, reg.to64(), RegisterOrMemory.mem(.qword_ptr, .{
.disp = @bitCast(u32, -@intCast(i32, disp)),
.base = ops.reg1,
}), emit.code);
@@ -306,7 +305,7 @@ fn mirPushPopRegsFromCalleePreservedRegs(emit: *Emit, tag: Tag, inst: Mir.Inst.I
}
fn mirJmpCall(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
switch (ops.flags) {
0b00 => {
const target = emit.mir.instructions.items(.data)[inst].inst;
@@ -335,7 +334,7 @@ fn mirJmpCall(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
0b10 => {
// JMP/CALL r/m64
const imm = emit.mir.instructions.items(.data)[inst].imm;
- return lowerToMEnc(tag, RegisterOrMemory.mem(Memory.PtrSize.fromBits(ops.reg1.size()), .{
+ return lowerToMEnc(tag, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg1.size()), .{
.disp = imm,
.base = ops.reg1,
}), emit.code);
@@ -345,7 +344,7 @@ fn mirJmpCall(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
}
fn mirCondJmp(emit: *Emit, mir_tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
const target = emit.mir.instructions.items(.data)[inst].inst;
const tag = switch (mir_tag) {
.cond_jmp_greater_less => switch (ops.flags) {
@@ -377,7 +376,7 @@ fn mirCondJmp(emit: *Emit, mir_tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerErr
}
fn mirCondSetByte(emit: *Emit, mir_tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
const tag = switch (mir_tag) {
.cond_set_byte_greater_less => switch (ops.flags) {
0b00 => Tag.setge,
@@ -407,9 +406,9 @@ fn mirCondSetByte(emit: *Emit, mir_tag: Mir.Inst.Tag, inst: Mir.Inst.Index) Inne
}
fn mirCondMov(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
if (ops.flags == 0b00) {
- return lowerToRmEnc(tag, Register.reg(ops.reg1), RegisterOrMemory.reg(ops.reg2), emit.code);
+ return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.reg(ops.reg2), emit.code);
}
const imm = emit.mir.instructions.items(.data)[inst].imm;
const ptr_size: Memory.PtrSize = switch (ops.flags) {
@@ -418,7 +417,7 @@ fn mirCondMov(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
0b10 => .dword_ptr,
0b11 => .qword_ptr,
};
- return lowerToRmEnc(tag, Register.reg(ops.reg1), RegisterOrMemory.mem(ptr_size, .{
+ return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.mem(ptr_size, .{
.disp = imm,
.base = ops.reg2,
}), emit.code);
@@ -427,7 +426,7 @@ fn mirCondMov(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
fn mirTest(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .@"test");
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
switch (ops.flags) {
0b00 => {
if (ops.reg2 == .none) {
@@ -442,12 +441,7 @@ fn mirTest(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
return lowerToMiEnc(.@"test", RegisterOrMemory.reg(ops.reg1), imm, emit.code);
}
// TEST r/m64, r64
- return lowerToMrEnc(
- .@"test",
- RegisterOrMemory.reg(ops.reg1),
- Register.reg(ops.reg2),
- emit.code,
- );
+ return lowerToMrEnc(.@"test", RegisterOrMemory.reg(ops.reg1), ops.reg2, emit.code);
},
else => return emit.fail("TODO more TEST alternatives", .{}),
}
@@ -456,7 +450,7 @@ fn mirTest(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
fn mirRet(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .ret);
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
switch (ops.flags) {
0b00 => {
// RETF imm16
@@ -480,7 +474,7 @@ fn mirRet(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
}
fn mirArith(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
switch (ops.flags) {
0b00 => {
if (ops.reg2 == .none) {
@@ -491,14 +485,14 @@ fn mirArith(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
}
// mov reg1, reg2
// RM
- return lowerToRmEnc(tag, Register.reg(ops.reg1), RegisterOrMemory.reg(ops.reg2), emit.code);
+ return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.reg(ops.reg2), emit.code);
},
0b01 => {
// mov reg1, [reg2 + imm32]
// RM
const imm = emit.mir.instructions.items(.data)[inst].imm;
- const src_reg: ?GpRegister = if (ops.reg2 == .none) null else ops.reg2;
- return lowerToRmEnc(tag, Register.reg(ops.reg1), RegisterOrMemory.mem(Memory.PtrSize.fromBits(ops.reg1.size()), .{
+ const src_reg: ?Register = if (ops.reg2 != .none) ops.reg2 else null;
+ return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg1.size()), .{
.disp = imm,
.base = src_reg,
}), emit.code);
@@ -510,10 +504,10 @@ fn mirArith(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
// mov [reg1 + imm32], reg2
// MR
const imm = emit.mir.instructions.items(.data)[inst].imm;
- return lowerToMrEnc(tag, RegisterOrMemory.mem(Memory.PtrSize.fromBits(ops.reg2.size()), .{
+ return lowerToMrEnc(tag, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg2.size()), .{
.disp = imm,
.base = ops.reg1,
- }), Register.reg(ops.reg2), emit.code);
+ }), ops.reg2, emit.code);
},
0b11 => {
return emit.fail("TODO unused variant: mov reg1, reg2, 0b11", .{});
@@ -522,7 +516,7 @@ fn mirArith(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
}
fn mirArithMemImm(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
assert(ops.reg2 == .none);
const payload = emit.mir.instructions.items(.data)[inst].payload;
const imm_pair = emit.mir.extraData(Mir.ImmPair, payload).data;
@@ -539,19 +533,14 @@ fn mirArithMemImm(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
}
inline fn setRexWRegister(reg: Register) bool {
- switch (reg) {
- .avx_register => return false,
- .register => |r| {
- if (r.size() == 64) return true;
- return switch (r) {
- .ah, .bh, .ch, .dh => true,
- else => false,
- };
- },
- }
+ if (reg.size() == 64) return true;
+ return switch (reg) {
+ .ah, .ch, .dh, .bh => true,
+ else => false,
+ };
}
-inline fn immOpSize(u_imm: u32) u8 {
+inline fn immOpSize(u_imm: u32) u6 {
const imm = @bitCast(i32, u_imm);
if (math.minInt(i8) <= imm and imm <= math.maxInt(i8)) {
return 8;
@@ -563,7 +552,7 @@ inline fn immOpSize(u_imm: u32) u8 {
}
fn mirArithScaleSrc(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
const scale = ops.flags;
const imm = emit.mir.instructions.items(.data)[inst].imm;
// OP reg1, [reg2 + scale*rcx + imm32]
@@ -571,7 +560,7 @@ fn mirArithScaleSrc(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void
.scale = scale,
.index = .rcx,
};
- return lowerToRmEnc(tag, Register.reg(ops.reg1), RegisterOrMemory.mem(Memory.PtrSize.fromBits(ops.reg1.size()), .{
+ return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg1.size()), .{
.disp = imm,
.base = ops.reg2,
.scale_index = scale_index,
@@ -579,7 +568,7 @@ fn mirArithScaleSrc(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void
}
fn mirArithScaleDst(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
const scale = ops.flags;
const imm = emit.mir.instructions.items(.data)[inst].imm;
const scale_index = ScaleIndex{
@@ -595,15 +584,15 @@ fn mirArithScaleDst(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void
}), imm, emit.code);
}
// OP [reg1 + scale*rax + imm32], reg2
- return lowerToMrEnc(tag, RegisterOrMemory.mem(Memory.PtrSize.fromBits(ops.reg2.size()), .{
+ return lowerToMrEnc(tag, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg2.size()), .{
.disp = imm,
.base = ops.reg1,
.scale_index = scale_index,
- }), Register.reg(ops.reg2), emit.code);
+ }), ops.reg2, emit.code);
}
fn mirArithScaleImm(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
const scale = ops.flags;
const payload = emit.mir.instructions.items(.data)[inst].payload;
const imm_pair = emit.mir.extraData(Mir.ImmPair, payload).data;
@@ -620,7 +609,7 @@ fn mirArithScaleImm(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void
}
fn mirArithMemIndexImm(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
assert(ops.reg2 == .none);
const payload = emit.mir.instructions.items(.data)[inst].payload;
const imm_pair = emit.mir.extraData(Mir.ImmPair, payload).data;
@@ -645,27 +634,27 @@ fn mirArithMemIndexImm(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!v
fn mirMovSignExtend(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const mir_tag = emit.mir.instructions.items(.tag)[inst];
assert(mir_tag == .mov_sign_extend);
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
const imm = if (ops.flags != 0b00) emit.mir.instructions.items(.data)[inst].imm else undefined;
switch (ops.flags) {
0b00 => {
const tag: Tag = if (ops.reg2.size() == 32) .movsxd else .movsx;
- return lowerToRmEnc(tag, Register.reg(ops.reg1), RegisterOrMemory.reg(ops.reg2), emit.code);
+ return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.reg(ops.reg2), emit.code);
},
0b01 => {
- return lowerToRmEnc(.movsx, Register.reg(ops.reg1), RegisterOrMemory.mem(.byte_ptr, .{
+ return lowerToRmEnc(.movsx, ops.reg1, RegisterOrMemory.mem(.byte_ptr, .{
.disp = imm,
.base = ops.reg2,
}), emit.code);
},
0b10 => {
- return lowerToRmEnc(.movsx, Register.reg(ops.reg1), RegisterOrMemory.mem(.word_ptr, .{
+ return lowerToRmEnc(.movsx, ops.reg1, RegisterOrMemory.mem(.word_ptr, .{
.disp = imm,
.base = ops.reg2,
}), emit.code);
},
0b11 => {
- return lowerToRmEnc(.movsxd, Register.reg(ops.reg1), RegisterOrMemory.mem(.dword_ptr, .{
+ return lowerToRmEnc(.movsxd, ops.reg1, RegisterOrMemory.mem(.dword_ptr, .{
.disp = imm,
.base = ops.reg2,
}), emit.code);
@@ -676,20 +665,20 @@ fn mirMovSignExtend(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
fn mirMovZeroExtend(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const mir_tag = emit.mir.instructions.items(.tag)[inst];
assert(mir_tag == .mov_zero_extend);
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
const imm = if (ops.flags != 0b00) emit.mir.instructions.items(.data)[inst].imm else undefined;
switch (ops.flags) {
0b00 => {
- return lowerToRmEnc(.movzx, Register.reg(ops.reg1), RegisterOrMemory.reg(ops.reg2), emit.code);
+ return lowerToRmEnc(.movzx, ops.reg1, RegisterOrMemory.reg(ops.reg2), emit.code);
},
0b01 => {
- return lowerToRmEnc(.movzx, Register.reg(ops.reg1), RegisterOrMemory.mem(.byte_ptr, .{
+ return lowerToRmEnc(.movzx, ops.reg1, RegisterOrMemory.mem(.byte_ptr, .{
.disp = imm,
.base = ops.reg2,
}), emit.code);
},
0b10 => {
- return lowerToRmEnc(.movzx, Register.reg(ops.reg1), RegisterOrMemory.mem(.word_ptr, .{
+ return lowerToRmEnc(.movzx, ops.reg1, RegisterOrMemory.mem(.word_ptr, .{
.disp = imm,
.base = ops.reg2,
}), emit.code);
@@ -703,31 +692,46 @@ fn mirMovZeroExtend(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
fn mirMovabs(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .movabs);
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
- const imm: u64 = if (ops.reg1.size() == 64) blk: {
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const imm = emit.mir.extraData(Mir.Imm64, payload).data;
- break :blk imm.decode();
- } else emit.mir.instructions.items(.data)[inst].imm;
- if (ops.flags == 0b00) {
- // movabs reg, imm64
- // OI
- return lowerToOiEnc(.mov, Register.reg(ops.reg1), imm, emit.code);
- }
- if (ops.reg1 == .none) {
- // movabs moffs64, rax
- // TD
- return lowerToTdEnc(.mov, imm, Register.reg(ops.reg2), emit.code);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
+ switch (ops.flags) {
+ 0b00 => {
+ const imm: u64 = if (ops.reg1.size() == 64) blk: {
+ const payload = emit.mir.instructions.items(.data)[inst].payload;
+ const imm = emit.mir.extraData(Mir.Imm64, payload).data;
+ break :blk imm.decode();
+ } else emit.mir.instructions.items(.data)[inst].imm;
+ // movabs reg, imm64
+ // OI
+ return lowerToOiEnc(.mov, ops.reg1, imm, emit.code);
+ },
+ 0b01 => {
+ if (ops.reg1 == .none) {
+ const imm: u64 = if (ops.reg2.size() == 64) blk: {
+ const payload = emit.mir.instructions.items(.data)[inst].payload;
+ const imm = emit.mir.extraData(Mir.Imm64, payload).data;
+ break :blk imm.decode();
+ } else emit.mir.instructions.items(.data)[inst].imm;
+ // movabs moffs64, rax
+ // TD
+ return lowerToTdEnc(.mov, imm, ops.reg2, emit.code);
+ }
+ const imm: u64 = if (ops.reg1.size() == 64) blk: {
+ const payload = emit.mir.instructions.items(.data)[inst].payload;
+ const imm = emit.mir.extraData(Mir.Imm64, payload).data;
+ break :blk imm.decode();
+ } else emit.mir.instructions.items(.data)[inst].imm;
+ // movabs rax, moffs64
+ // FD
+ return lowerToFdEnc(.mov, ops.reg1, imm, emit.code);
+ },
+ else => return emit.fail("TODO unused variant: movabs 0b{b}", .{ops.flags}),
}
- // movabs rax, moffs64
- // FD
- return lowerToFdEnc(.mov, Register.reg(ops.reg1), imm, emit.code);
}
fn mirFisttp(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .fisttp);
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
// the selecting between operand sizes for this particular `fisttp` instruction
// is done via opcode instead of the usual prefixes.
@@ -749,7 +753,7 @@ fn mirFisttp(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
fn mirFld(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .fld);
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
// the selecting between operand sizes for this particular `fisttp` instruction
// is done via opcode instead of the usual prefixes.
@@ -768,7 +772,7 @@ fn mirFld(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
}
fn mirShift(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
switch (ops.flags) {
0b00 => {
// sal reg1, 1
@@ -793,12 +797,11 @@ fn mirShift(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
}
fn mirMulDiv(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
if (ops.reg1 != .none) {
assert(ops.reg2 == .none);
return lowerToMEnc(tag, RegisterOrMemory.reg(ops.reg1), emit.code);
}
- assert(ops.reg1 == .none);
assert(ops.reg2 != .none);
const imm = emit.mir.instructions.items(.data)[inst].imm;
const ptr_size: Memory.PtrSize = switch (ops.flags) {
@@ -816,27 +819,27 @@ fn mirMulDiv(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
fn mirIMulComplex(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .imul_complex);
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
switch (ops.flags) {
0b00 => {
- return lowerToRmEnc(.imul, Register.reg(ops.reg1), RegisterOrMemory.reg(ops.reg2), emit.code);
+ return lowerToRmEnc(.imul, ops.reg1, RegisterOrMemory.reg(ops.reg2), emit.code);
},
0b01 => {
const imm = emit.mir.instructions.items(.data)[inst].imm;
- const src_reg: ?GpRegister = if (ops.reg2 == .none) null else ops.reg2;
- return lowerToRmEnc(.imul, Register.reg(ops.reg1), RegisterOrMemory.mem(.qword_ptr, .{
+ const src_reg: ?Register = if (ops.reg2 != .none) ops.reg2 else null;
+ return lowerToRmEnc(.imul, ops.reg1, RegisterOrMemory.mem(.qword_ptr, .{
.disp = imm,
.base = src_reg,
}), emit.code);
},
0b10 => {
const imm = emit.mir.instructions.items(.data)[inst].imm;
- return lowerToRmiEnc(.imul, Register.reg(ops.reg1), RegisterOrMemory.reg(ops.reg2), imm, emit.code);
+ return lowerToRmiEnc(.imul, ops.reg1, RegisterOrMemory.reg(ops.reg2), imm, emit.code);
},
0b11 => {
const payload = emit.mir.instructions.items(.data)[inst].payload;
const imm_pair = emit.mir.extraData(Mir.ImmPair, payload).data;
- return lowerToRmiEnc(.imul, Register.reg(ops.reg1), RegisterOrMemory.mem(.qword_ptr, .{
+ return lowerToRmiEnc(.imul, ops.reg1, RegisterOrMemory.mem(.qword_ptr, .{
.disp = imm_pair.dest_off,
.base = ops.reg2,
}), imm_pair.operand, emit.code);
@@ -845,7 +848,7 @@ fn mirIMulComplex(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
}
fn mirCwd(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
const tag: Tag = switch (ops.flags) {
0b00 => .cbw,
0b01 => .cwd,
@@ -858,17 +861,17 @@ fn mirCwd(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
fn mirLea(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .lea);
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
switch (ops.flags) {
0b00 => {
// lea reg1, [reg2 + imm32]
// RM
const imm = emit.mir.instructions.items(.data)[inst].imm;
- const src_reg: ?GpRegister = if (ops.reg2 == .none) null else ops.reg2;
+ const src_reg: ?Register = if (ops.reg2 != .none) ops.reg2 else null;
return lowerToRmEnc(
.lea,
- Register.reg(ops.reg1),
- RegisterOrMemory.mem(Memory.PtrSize.fromBits(ops.reg1.size()), .{
+ ops.reg1,
+ RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg1.size()), .{
.disp = imm,
.base = src_reg,
}),
@@ -881,8 +884,8 @@ fn mirLea(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const start_offset = emit.code.items.len;
try lowerToRmEnc(
.lea,
- Register.reg(ops.reg1),
- RegisterOrMemory.rip(Memory.PtrSize.fromBits(ops.reg1.size()), 0),
+ ops.reg1,
+ RegisterOrMemory.rip(Memory.PtrSize.new(ops.reg1.size()), 0),
emit.code,
);
const end_offset = emit.code.items.len;
@@ -895,15 +898,15 @@ fn mirLea(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
0b10 => {
// lea reg, [rbp + rcx + imm32]
const imm = emit.mir.instructions.items(.data)[inst].imm;
- const src_reg: ?GpRegister = if (ops.reg2 == .none) null else ops.reg2;
+ const src_reg: ?Register = if (ops.reg2 != .none) ops.reg2 else null;
const scale_index = ScaleIndex{
.scale = 0,
.index = .rcx,
};
return lowerToRmEnc(
.lea,
- Register.reg(ops.reg1),
- RegisterOrMemory.mem(Memory.PtrSize.fromBits(ops.reg1.size()), .{
+ ops.reg1,
+ RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg1.size()), .{
.disp = imm,
.base = src_reg,
.scale_index = scale_index,
@@ -918,15 +921,15 @@ fn mirLea(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
fn mirLeaPie(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .lea_pie);
- const ops = Mir.Ops(GpRegister, GpRegister).decode(emit.mir.instructions.items(.ops)[inst]);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
const load_reloc = emit.mir.instructions.items(.data)[inst].load_reloc;
// lea reg1, [rip + reloc]
// RM
try lowerToRmEnc(
.lea,
- Register.reg(ops.reg1),
- RegisterOrMemory.rip(Memory.PtrSize.fromBits(ops.reg1.size()), 0),
+ ops.reg1,
+ RegisterOrMemory.rip(Memory.PtrSize.new(ops.reg1.size()), 0),
emit.code,
);
@@ -962,78 +965,70 @@ fn mirLeaPie(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
fn mirMovF64(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .mov_f64);
- const ops = emit.mir.instructions.items(.ops)[inst];
- const flags = @truncate(u2, ops);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (flags) {
+ switch (ops.flags) {
0b00 => {
- const decoded = Mir.Ops(AvxRegister, GpRegister).decode(ops);
const imm = emit.mir.instructions.items(.data)[inst].imm;
- return lowerToRmEnc(.vmovsd, Register.avxReg(decoded.reg1), RegisterOrMemory.mem(.qword_ptr, .{
+ return lowerToVmEnc(.vmovsd, ops.reg1, RegisterOrMemory.mem(.qword_ptr, .{
.disp = imm,
- .base = decoded.reg2,
+ .base = ops.reg2,
}), emit.code);
},
0b01 => {
- const decoded = Mir.Ops(GpRegister, AvxRegister).decode(ops);
const imm = emit.mir.instructions.items(.data)[inst].imm;
- return lowerToMrEnc(.vmovsd, RegisterOrMemory.mem(.qword_ptr, .{
+ return lowerToMvEnc(.vmovsd, RegisterOrMemory.mem(.qword_ptr, .{
.disp = imm,
- .base = decoded.reg1,
- }), Register.avxReg(decoded.reg2), emit.code);
+ .base = ops.reg1,
+ }), ops.reg2, emit.code);
},
0b10 => {
- const decoded = Mir.Ops(AvxRegister, AvxRegister).decode(ops);
return lowerToRvmEnc(
.vmovsd,
- Register.avxReg(decoded.reg1),
- Register.avxReg(decoded.reg1),
- RegisterOrMemory.avxReg(decoded.reg2),
+ ops.reg1,
+ ops.reg1,
+ RegisterOrMemory.reg(ops.reg2),
emit.code,
);
},
- else => return emit.fail("TODO unused variant 0b{b} for mov_f64", .{flags}),
+ else => return emit.fail("TODO unused variant 0b{b} for mov_f64", .{ops.flags}),
}
}
fn mirAddF64(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .add_f64);
- const ops = emit.mir.instructions.items(.ops)[inst];
- const flags = @truncate(u2, ops);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (flags) {
+ switch (ops.flags) {
0b00 => {
- const decoded = Mir.Ops(AvxRegister, AvxRegister).decode(ops);
return lowerToRvmEnc(
.vaddsd,
- Register.avxReg(decoded.reg1),
- Register.avxReg(decoded.reg1),
- RegisterOrMemory.avxReg(decoded.reg2),
+ ops.reg1,
+ ops.reg1,
+ RegisterOrMemory.reg(ops.reg2),
emit.code,
);
},
- else => return emit.fail("TODO unused variant 0b{b} for mov_f64", .{flags}),
+ else => return emit.fail("TODO unused variant 0b{b} for mov_f64", .{ops.flags}),
}
}
fn mirCmpF64(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .cmp_f64);
- const ops = emit.mir.instructions.items(.ops)[inst];
- const flags = @truncate(u2, ops);
+ const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (flags) {
+ switch (ops.flags) {
0b00 => {
- const decoded = Mir.Ops(AvxRegister, AvxRegister).decode(ops);
- return lowerToRmEnc(
+ return lowerToVmEnc(
.vucomisd,
- Register.avxReg(decoded.reg1),
- RegisterOrMemory.avxReg(decoded.reg2),
+ ops.reg1,
+ RegisterOrMemory.reg(ops.reg2),
emit.code,
);
},
- else => return emit.fail("TODO unused variant 0b{b} for mov_f64", .{flags}),
+ else => return emit.fail("TODO unused variant 0b{b} for mov_f64", .{ops.flags}),
}
}
@@ -1277,6 +1272,18 @@ const Tag = enum {
vcmpsd,
vucomisd,
+ fn isAvx(tag: Tag) bool {
+ return switch (tag) {
+ .vmovsd,
+ .vaddsd,
+ .vcmpsd,
+ .vucomisd,
+ => true,
+
+ else => false,
+ };
+ }
+
fn isSetCC(tag: Tag) bool {
return switch (tag) {
.seto,
@@ -1361,6 +1368,12 @@ const Encoding = enum {
/// OP r64, r/m64, imm32
rmi,
+ /// OP xmm1, xmm2/m64
+ vm,
+
+ /// OP m64, xmm1
+ mv,
+
/// OP xmm1, xmm2, xmm3/m64
rvm,
@@ -1389,7 +1402,7 @@ const OpCode = union(enum) {
fn encodeWithReg(opc: OpCode, encoder: Encoder, reg: Register) void {
assert(opc == .one_byte);
- encoder.opcode_withReg(opc.one_byte, reg.lowId());
+ encoder.opcode_withReg(opc.one_byte, reg.lowEnc());
}
};
@@ -1536,6 +1549,15 @@ inline fn getOpCode(tag: Tag, enc: Encoding, is_one_byte: bool) ?OpCode {
.imul => OpCode.oneByte(if (is_one_byte) 0x6b else 0x69),
else => null,
},
+ .mv => return switch (tag) {
+ .vmovsd => OpCode.oneByte(0x11),
+ else => null,
+ },
+ .vm => return switch (tag) {
+ .vmovsd => OpCode.oneByte(0x10),
+ .vucomisd => OpCode.oneByte(0x2e),
+ else => null,
+ },
.rvm => return switch (tag) {
.vaddsd => OpCode.oneByte(0x58),
.vmovsd => OpCode.oneByte(0x10),
@@ -1647,11 +1669,11 @@ inline fn getVexPrefix(tag: Tag, enc: Encoding) ?VexPrefix {
} = .none,
} = blk: {
switch (enc) {
- .mr => switch (tag) {
+ .mv => switch (tag) {
.vmovsd => break :blk .{ .lig = true, .simd_prefix = .p_f2, .wig = true },
else => return null,
},
- .rm => switch (tag) {
+ .vm => switch (tag) {
.vmovsd => break :blk .{ .lig = true, .simd_prefix = .p_f2, .wig = true },
.vucomisd => break :blk .{ .lig = true, .simd_prefix = .p_66, .wig = true },
else => return null,
@@ -1697,61 +1719,50 @@ inline fn getVexPrefix(tag: Tag, enc: Encoding) ?VexPrefix {
} };
}
-const ScaleIndex = struct {
+const ScaleIndex = packed struct {
scale: u2,
- index: GpRegister,
+ index: Register,
};
const Memory = struct {
- base: ?GpRegister,
+ base: ?Register,
rip: bool = false,
disp: u32,
ptr_size: PtrSize,
scale_index: ?ScaleIndex = null,
- const PtrSize = enum {
- byte_ptr,
- word_ptr,
- dword_ptr,
- qword_ptr,
+ const PtrSize = enum(u2) {
+ byte_ptr = 0b00,
+ word_ptr = 0b01,
+ dword_ptr = 0b10,
+ qword_ptr = 0b11,
- fn fromBits(in_bits: u64) PtrSize {
- return switch (in_bits) {
- 8 => .byte_ptr,
- 16 => .word_ptr,
- 32 => .dword_ptr,
- 64 => .qword_ptr,
- else => unreachable,
- };
+ fn new(bit_size: u64) PtrSize {
+ return @intToEnum(PtrSize, math.log2_int(u4, @intCast(u4, @divExact(bit_size, 8))));
}
/// Returns size in bits.
fn size(ptr_size: PtrSize) u64 {
- return switch (ptr_size) {
- .byte_ptr => 8,
- .word_ptr => 16,
- .dword_ptr => 32,
- .qword_ptr => 64,
- };
+ return 8 * (math.powi(u8, 2, @enumToInt(ptr_size)) catch unreachable);
}
};
fn encode(mem_op: Memory, encoder: Encoder, operand: u3) void {
if (mem_op.base) |base| {
- const dst = base.lowId();
+ const dst = base.lowEnc();
const src = operand;
if (dst == 4 or mem_op.scale_index != null) {
if (mem_op.disp == 0 and dst != 5) {
encoder.modRm_SIBDisp0(src);
if (mem_op.scale_index) |si| {
- encoder.sib_scaleIndexBase(si.scale, si.index.lowId(), dst);
+ encoder.sib_scaleIndexBase(si.scale, si.index.lowEnc(), dst);
} else {
encoder.sib_base(dst);
}
} else if (immOpSize(mem_op.disp) == 8) {
encoder.modRm_SIBDisp8(src);
if (mem_op.scale_index) |si| {
- encoder.sib_scaleIndexBaseDisp8(si.scale, si.index.lowId(), dst);
+ encoder.sib_scaleIndexBaseDisp8(si.scale, si.index.lowEnc(), dst);
} else {
encoder.sib_baseDisp8(dst);
}
@@ -1759,7 +1770,7 @@ const Memory = struct {
} else {
encoder.modRm_SIBDisp32(src);
if (mem_op.scale_index) |si| {
- encoder.sib_scaleIndexBaseDisp32(si.scale, si.index.lowId(), dst);
+ encoder.sib_scaleIndexBaseDisp32(si.scale, si.index.lowEnc(), dst);
} else {
encoder.sib_baseDisp32(dst);
}
@@ -1782,7 +1793,7 @@ const Memory = struct {
} else {
encoder.modRm_SIBDisp0(operand);
if (mem_op.scale_index) |si| {
- encoder.sib_scaleIndexDisp32(si.scale, si.index.lowId());
+ encoder.sib_scaleIndexDisp32(si.scale, si.index.lowEnc());
} else {
encoder.sib_disp32();
}
@@ -1791,6 +1802,7 @@ const Memory = struct {
}
}
+ /// Returns size in bits.
fn size(memory: Memory) u64 {
return memory.ptr_size.size();
}
@@ -1805,55 +1817,17 @@ fn encodeImm(encoder: Encoder, imm: u32, size: u64) void {
}
}
-const Register = union(enum) {
- register: GpRegister,
- avx_register: AvxRegister,
-
- fn reg(register: GpRegister) Register {
- return .{ .register = register };
- }
-
- fn avxReg(register: AvxRegister) Register {
- return .{ .avx_register = register };
- }
-
- fn lowId(register: Register) u3 {
- return switch (register) {
- .register => |r| r.lowId(),
- .avx_register => |r| r.lowId(),
- };
- }
-
- fn size(register: Register) u64 {
- return switch (register) {
- .register => |r| r.size(),
- .avx_register => |r| r.size(),
- };
- }
-
- fn isExtended(register: Register) bool {
- return switch (register) {
- .register => |r| r.isExtended(),
- .avx_register => |r| r.isExtended(),
- };
- }
-};
-
const RegisterOrMemory = union(enum) {
register: Register,
memory: Memory,
- fn reg(register: GpRegister) RegisterOrMemory {
- return .{ .register = Register.reg(register) };
- }
-
- fn avxReg(register: AvxRegister) RegisterOrMemory {
- return .{ .register = Register.avxReg(register) };
+ fn reg(register: Register) RegisterOrMemory {
+ return .{ .register = register };
}
fn mem(ptr_size: Memory.PtrSize, args: struct {
disp: u32,
- base: ?GpRegister = null,
+ base: ?Register = null,
scale_index: ?ScaleIndex = null,
}) RegisterOrMemory {
return .{
@@ -1877,6 +1851,7 @@ const RegisterOrMemory = union(enum) {
};
}
+ /// Returns size in bits.
fn size(reg_or_mem: RegisterOrMemory) u64 {
return switch (reg_or_mem) {
.register => |reg| reg.size(),
@@ -1886,6 +1861,7 @@ const RegisterOrMemory = union(enum) {
};
fn lowerToZoEnc(tag: Tag, code: *std.ArrayList(u8)) InnerError!void {
+ assert(!tag.isAvx());
const opc = getOpCode(tag, .zo, false).?;
const encoder = try Encoder.init(code, 2);
switch (tag) {
@@ -1900,6 +1876,7 @@ fn lowerToZoEnc(tag: Tag, code: *std.ArrayList(u8)) InnerError!void {
}
fn lowerToIEnc(tag: Tag, imm: u32, code: *std.ArrayList(u8)) InnerError!void {
+ assert(!tag.isAvx());
if (tag == .ret_far or tag == .ret_near) {
const encoder = try Encoder.init(code, 3);
const opc = getOpCode(tag, .i, false).?;
@@ -1917,6 +1894,7 @@ fn lowerToIEnc(tag: Tag, imm: u32, code: *std.ArrayList(u8)) InnerError!void {
}
fn lowerToOEnc(tag: Tag, reg: Register, code: *std.ArrayList(u8)) InnerError!void {
+ assert(!tag.isAvx());
const opc = getOpCode(tag, .o, false).?;
const encoder = try Encoder.init(code, 3);
if (reg.size() == 16) {
@@ -1930,6 +1908,7 @@ fn lowerToOEnc(tag: Tag, reg: Register, code: *std.ArrayList(u8)) InnerError!voi
}
fn lowerToDEnc(tag: Tag, imm: u32, code: *std.ArrayList(u8)) InnerError!void {
+ assert(!tag.isAvx());
const opc = getOpCode(tag, .d, false).?;
const encoder = try Encoder.init(code, 6);
opc.encode(encoder);
@@ -1937,6 +1916,7 @@ fn lowerToDEnc(tag: Tag, imm: u32, code: *std.ArrayList(u8)) InnerError!void {
}
fn lowerToMxEnc(tag: Tag, reg_or_mem: RegisterOrMemory, enc: Encoding, code: *std.ArrayList(u8)) InnerError!void {
+ assert(!tag.isAvx());
const opc = getOpCode(tag, enc, reg_or_mem.size() == 8).?;
const modrm_ext = getModRmExt(tag).?;
switch (reg_or_mem) {
@@ -1951,7 +1931,7 @@ fn lowerToMxEnc(tag: Tag, reg_or_mem: RegisterOrMemory, enc: Encoding, code: *st
.b = reg.isExtended(),
});
opc.encode(encoder);
- encoder.modRm_direct(modrm_ext, reg.lowId());
+ encoder.modRm_direct(modrm_ext, reg.lowEnc());
},
.memory => |mem_op| {
const encoder = try Encoder.init(code, 8);
@@ -1992,6 +1972,7 @@ fn lowerToFdEnc(tag: Tag, reg: Register, moffs: u64, code: *std.ArrayList(u8)) I
}
fn lowerToTdFdEnc(tag: Tag, reg: Register, moffs: u64, code: *std.ArrayList(u8), td: bool) InnerError!void {
+ assert(!tag.isAvx());
const opc = if (td)
getOpCode(tag, .td, reg.size() == 8).?
else
@@ -2014,6 +1995,7 @@ fn lowerToTdFdEnc(tag: Tag, reg: Register, moffs: u64, code: *std.ArrayList(u8),
}
fn lowerToOiEnc(tag: Tag, reg: Register, imm: u64, code: *std.ArrayList(u8)) InnerError!void {
+ assert(!tag.isAvx());
const opc = getOpCode(tag, .oi, reg.size() == 8).?;
const encoder = try Encoder.init(code, 10);
if (reg.size() == 16) {
@@ -2040,6 +2022,7 @@ fn lowerToMiXEnc(
enc: Encoding,
code: *std.ArrayList(u8),
) InnerError!void {
+ assert(!tag.isAvx());
const modrm_ext = getModRmExt(tag).?;
const opc = getOpCode(tag, enc, reg_or_mem.size() == 8).?;
switch (reg_or_mem) {
@@ -2056,7 +2039,7 @@ fn lowerToMiXEnc(
.b = dst_reg.isExtended(),
});
opc.encode(encoder);
- encoder.modRm_direct(modrm_ext, dst_reg.lowId());
+ encoder.modRm_direct(modrm_ext, dst_reg.lowEnc());
encodeImm(encoder, imm, if (enc == .mi8) 8 else dst_reg.size());
},
.memory => |dst_mem| {
@@ -2095,84 +2078,43 @@ fn lowerToRmEnc(
reg_or_mem: RegisterOrMemory,
code: *std.ArrayList(u8),
) InnerError!void {
+ assert(!tag.isAvx());
const opc = getOpCode(tag, .rm, reg.size() == 8 or reg_or_mem.size() == 8).?;
switch (reg_or_mem) {
.register => |src_reg| {
- const encoder: Encoder = blk: {
- switch (reg) {
- .register => {
- const encoder = try Encoder.init(code, 4);
- if (reg.size() == 16) {
- encoder.prefix16BitMode();
- }
- encoder.rex(.{
- .w = setRexWRegister(reg) or setRexWRegister(src_reg),
- .r = reg.isExtended(),
- .b = src_reg.isExtended(),
- });
- break :blk encoder;
- },
- .avx_register => {
- const encoder = try Encoder.init(code, 5);
- var vex_prefix = getVexPrefix(tag, .rm).?;
- const vex = &vex_prefix.prefix;
- vex.rex(.{
- .r = reg.isExtended(),
- .b = src_reg.isExtended(),
- });
- encoder.vex(vex_prefix.prefix);
- break :blk encoder;
- },
- }
- };
+ const encoder = try Encoder.init(code, 5);
+ if (reg.size() == 16) {
+ encoder.prefix16BitMode();
+ }
+ encoder.rex(.{
+ .w = setRexWRegister(reg) or setRexWRegister(src_reg),
+ .r = reg.isExtended(),
+ .b = src_reg.isExtended(),
+ });
opc.encode(encoder);
- encoder.modRm_direct(reg.lowId(), src_reg.lowId());
+ encoder.modRm_direct(reg.lowEnc(), src_reg.lowEnc());
},
.memory => |src_mem| {
- const encoder: Encoder = blk: {
- switch (reg) {
- .register => {
- const encoder = try Encoder.init(code, 9);
- if (reg.size() == 16) {
- encoder.prefix16BitMode();
- }
- if (src_mem.base) |base| {
- // TODO handle 32-bit base register - requires prefix 0x67
- // Intel Manual, Vol 1, chapter 3.6 and 3.6.1
- encoder.rex(.{
- .w = setRexWRegister(reg),
- .r = reg.isExtended(),
- .b = base.isExtended(),
- });
- } else {
- encoder.rex(.{
- .w = setRexWRegister(reg),
- .r = reg.isExtended(),
- });
- }
- break :blk encoder;
- },
- .avx_register => {
- const encoder = try Encoder.init(code, 10);
- var vex_prefix = getVexPrefix(tag, .rm).?;
- const vex = &vex_prefix.prefix;
- if (src_mem.base) |base| {
- vex.rex(.{
- .r = reg.isExtended(),
- .b = base.isExtended(),
- });
- } else {
- vex.rex(.{
- .r = reg.isExtended(),
- });
- }
- encoder.vex(vex_prefix.prefix);
- break :blk encoder;
- },
- }
- };
+ const encoder = try Encoder.init(code, 9);
+ if (reg.size() == 16) {
+ encoder.prefix16BitMode();
+ }
+ if (src_mem.base) |base| {
+ // TODO handle 32-bit base register - requires prefix 0x67
+ // Intel Manual, Vol 1, chapter 3.6 and 3.6.1
+ encoder.rex(.{
+ .w = setRexWRegister(reg),
+ .r = reg.isExtended(),
+ .b = base.isExtended(),
+ });
+ } else {
+ encoder.rex(.{
+ .w = setRexWRegister(reg),
+ .r = reg.isExtended(),
+ });
+ }
opc.encode(encoder);
- src_mem.encode(encoder, reg.lowId());
+ src_mem.encode(encoder, reg.lowEnc());
},
}
}
@@ -2183,6 +2125,7 @@ fn lowerToMrEnc(
reg: Register,
code: *std.ArrayList(u8),
) InnerError!void {
+ assert(!tag.isAvx());
const opc = getOpCode(tag, .mr, reg.size() == 8 or reg_or_mem.size() == 8).?;
switch (reg_or_mem) {
.register => |dst_reg| {
@@ -2196,53 +2139,27 @@ fn lowerToMrEnc(
.b = dst_reg.isExtended(),
});
opc.encode(encoder);
- encoder.modRm_direct(reg.lowId(), dst_reg.lowId());
+ encoder.modRm_direct(reg.lowEnc(), dst_reg.lowEnc());
},
.memory => |dst_mem| {
- const encoder: Encoder = blk: {
- switch (reg) {
- .register => {
- const encoder = try Encoder.init(code, 9);
- if (reg.size() == 16) {
- encoder.prefix16BitMode();
- }
- if (dst_mem.base) |base| {
- encoder.rex(.{
- .w = dst_mem.ptr_size == .qword_ptr or setRexWRegister(reg),
- .r = reg.isExtended(),
- .b = base.isExtended(),
- });
- } else {
- encoder.rex(.{
- .w = dst_mem.ptr_size == .qword_ptr or setRexWRegister(reg),
- .r = reg.isExtended(),
- });
- }
- break :blk encoder;
- },
- .avx_register => {
- const encoder = try Encoder.init(code, 10);
- var vex_prefix = getVexPrefix(tag, .mr).?;
- const vex = &vex_prefix.prefix;
- if (dst_mem.base) |base| {
- vex.rex(.{
- .w = dst_mem.ptr_size == .qword_ptr,
- .r = reg.isExtended(),
- .b = base.isExtended(),
- });
- } else {
- vex.rex(.{
- .w = dst_mem.ptr_size == .qword_ptr,
- .r = reg.isExtended(),
- });
- }
- encoder.vex(vex_prefix.prefix);
- break :blk encoder;
- },
- }
- };
+ const encoder = try Encoder.init(code, 9);
+ if (reg.size() == 16) {
+ encoder.prefix16BitMode();
+ }
+ if (dst_mem.base) |base| {
+ encoder.rex(.{
+ .w = dst_mem.ptr_size == .qword_ptr or setRexWRegister(reg),
+ .r = reg.isExtended(),
+ .b = base.isExtended(),
+ });
+ } else {
+ encoder.rex(.{
+ .w = dst_mem.ptr_size == .qword_ptr or setRexWRegister(reg),
+ .r = reg.isExtended(),
+ });
+ }
opc.encode(encoder);
- dst_mem.encode(encoder, reg.lowId());
+ dst_mem.encode(encoder, reg.lowEnc());
},
}
}
@@ -2254,6 +2171,7 @@ fn lowerToRmiEnc(
imm: u32,
code: *std.ArrayList(u8),
) InnerError!void {
+ assert(!tag.isAvx());
const opc = getOpCode(tag, .rmi, false).?;
const encoder = try Encoder.init(code, 13);
if (reg.size() == 16) {
@@ -2267,7 +2185,7 @@ fn lowerToRmiEnc(
.b = src_reg.isExtended(),
});
opc.encode(encoder);
- encoder.modRm_direct(reg.lowId(), src_reg.lowId());
+ encoder.modRm_direct(reg.lowEnc(), src_reg.lowEnc());
},
.memory => |src_mem| {
if (src_mem.base) |base| {
@@ -2285,12 +2203,94 @@ fn lowerToRmiEnc(
});
}
opc.encode(encoder);
- src_mem.encode(encoder, reg.lowId());
+ src_mem.encode(encoder, reg.lowEnc());
},
}
encodeImm(encoder, imm, reg.size());
}
+/// Also referred to as XM encoding in Intel manual.
+fn lowerToVmEnc(
+ tag: Tag,
+ reg: Register,
+ reg_or_mem: RegisterOrMemory,
+ code: *std.ArrayList(u8),
+) InnerError!void {
+ const opc = getOpCode(tag, .vm, false).?;
+ var vex_prefix = getVexPrefix(tag, .vm).?;
+ const vex = &vex_prefix.prefix;
+ switch (reg_or_mem) {
+ .register => |src_reg| {
+ const encoder = try Encoder.init(code, 5);
+ vex.rex(.{
+ .r = reg.isExtended(),
+ .b = src_reg.isExtended(),
+ });
+ encoder.vex(vex_prefix.prefix);
+ opc.encode(encoder);
+ encoder.modRm_direct(reg.lowEnc(), src_reg.lowEnc());
+ },
+ .memory => |src_mem| {
+ assert(src_mem.ptr_size == .qword_ptr);
+ const encoder = try Encoder.init(code, 10);
+ if (src_mem.base) |base| {
+ vex.rex(.{
+ .r = reg.isExtended(),
+ .b = base.isExtended(),
+ });
+ } else {
+ vex.rex(.{
+ .r = reg.isExtended(),
+ });
+ }
+ encoder.vex(vex_prefix.prefix);
+ opc.encode(encoder);
+ src_mem.encode(encoder, reg.lowEnc());
+ },
+ }
+}
+
+/// Usually referred to as MR encoding with V/V in Intel manual.
+fn lowerToMvEnc(
+ tag: Tag,
+ reg_or_mem: RegisterOrMemory,
+ reg: Register,
+ code: *std.ArrayList(u8),
+) InnerError!void {
+ const opc = getOpCode(tag, .mv, false).?;
+ var vex_prefix = getVexPrefix(tag, .mv).?;
+ const vex = &vex_prefix.prefix;
+ switch (reg_or_mem) {
+ .register => |dst_reg| {
+ const encoder = try Encoder.init(code, 4);
+ vex.rex(.{
+ .r = reg.isExtended(),
+ .b = dst_reg.isExtended(),
+ });
+ encoder.vex(vex_prefix.prefix);
+ opc.encode(encoder);
+ encoder.modRm_direct(reg.lowEnc(), dst_reg.lowEnc());
+ },
+ .memory => |dst_mem| {
+ assert(dst_mem.ptr_size == .qword_ptr);
+ const encoder = try Encoder.init(code, 10);
+ if (dst_mem.base) |base| {
+ vex.rex(.{
+ .r = reg.isExtended(),
+ .b = base.isExtended(),
+ });
+ } else {
+ vex.rex(.{
+ .r = reg.isExtended(),
+ });
+ }
+ encoder.vex(vex_prefix.prefix);
+ opc.encode(encoder);
+ dst_mem.encode(encoder, reg.lowEnc());
+ },
+ }
+}
+
fn lowerToRvmEnc(
tag: Tag,
reg1: Register,
@@ -2305,7 +2305,7 @@ fn lowerToRvmEnc(
.register => |reg3| {
if (vex_prefix.reg) |vvvv| {
switch (vvvv) {
- .nds => vex.reg(reg2.avx_register.id()),
+ .nds => vex.reg(reg2.enc()),
else => unreachable, // TODO
}
}
@@ -2316,7 +2316,7 @@ fn lowerToRvmEnc(
});
encoder.vex(vex_prefix.prefix);
opc.encode(encoder);
- encoder.modRm_direct(reg1.lowId(), reg3.lowId());
+ encoder.modRm_direct(reg1.lowEnc(), reg3.lowEnc());
},
.memory => |dst_mem| {
_ = dst_mem;
@@ -2341,7 +2341,7 @@ fn lowerToRvmiEnc(
.register => |reg3| {
if (vex_prefix.reg) |vvvv| {
switch (vvvv) {
- .nds => vex.reg(reg2.avx_register.id()),
+ .nds => vex.reg(reg2.enc()),
else => unreachable, // TODO
}
}
@@ -2352,7 +2352,7 @@ fn lowerToRvmiEnc(
});
encoder.vex(vex_prefix.prefix);
opc.encode(encoder);
- encoder.modRm_direct(reg1.lowId(), reg3.lowId());
+ encoder.modRm_direct(reg1.lowEnc(), reg3.lowEnc());
break :blk encoder;
},
.memory => |dst_mem| {
@@ -2490,23 +2490,23 @@ test "lower MI encoding" {
test "lower RM encoding" {
var emit = TestEmit.init();
defer emit.deinit();
- try lowerToRmEnc(.mov, Register.reg(.rax), RegisterOrMemory.reg(.rbx), emit.code());
+ try lowerToRmEnc(.mov, .rax, RegisterOrMemory.reg(.rbx), emit.code());
try expectEqualHexStrings("\x48\x8b\xc3", emit.lowered(), "mov rax, rbx");
- try lowerToRmEnc(.mov, Register.reg(.rax), RegisterOrMemory.mem(.qword_ptr, .{ .disp = 0, .base = .r11 }), emit.code());
+ try lowerToRmEnc(.mov, .rax, RegisterOrMemory.mem(.qword_ptr, .{ .disp = 0, .base = .r11 }), emit.code());
try expectEqualHexStrings("\x49\x8b\x03", emit.lowered(), "mov rax, qword ptr [r11 + 0]");
- try lowerToRmEnc(.add, Register.reg(.r11), RegisterOrMemory.mem(.qword_ptr, .{ .disp = 0x10000000 }), emit.code());
+ try lowerToRmEnc(.add, .r11, RegisterOrMemory.mem(.qword_ptr, .{ .disp = 0x10000000 }), emit.code());
try expectEqualHexStrings(
"\x4C\x03\x1C\x25\x00\x00\x00\x10",
emit.lowered(),
"add r11, qword ptr [ds:0x10000000]",
);
- try lowerToRmEnc(.add, Register.reg(.r12b), RegisterOrMemory.mem(.byte_ptr, .{ .disp = 0x10000000 }), emit.code());
+ try lowerToRmEnc(.add, .r12b, RegisterOrMemory.mem(.byte_ptr, .{ .disp = 0x10000000 }), emit.code());
try expectEqualHexStrings(
"\x44\x02\x24\x25\x00\x00\x00\x10",
emit.lowered(),
"add r11b, byte ptr [ds:0x10000000]",
);
- try lowerToRmEnc(.sub, Register.reg(.r11), RegisterOrMemory.mem(.qword_ptr, .{
+ try lowerToRmEnc(.sub, .r11, RegisterOrMemory.mem(.qword_ptr, .{
.disp = 0x10000000,
.base = .r13,
}), emit.code());
@@ -2515,7 +2515,7 @@ test "lower RM encoding" {
emit.lowered(),
"sub r11, qword ptr [r13 + 0x10000000]",
);
- try lowerToRmEnc(.sub, Register.reg(.r11), RegisterOrMemory.mem(.qword_ptr, .{
+ try lowerToRmEnc(.sub, .r11, RegisterOrMemory.mem(.qword_ptr, .{
.disp = 0x10000000,
.base = .r12,
}), emit.code());
@@ -2524,14 +2524,14 @@ test "lower RM encoding" {
emit.lowered(),
"sub r11, qword ptr [r12 + 0x10000000]",
);
- try lowerToRmEnc(.mov, Register.reg(.rax), RegisterOrMemory.mem(.qword_ptr, .{
+ try lowerToRmEnc(.mov, .rax, RegisterOrMemory.mem(.qword_ptr, .{
.disp = @bitCast(u32, @as(i32, -4)),
.base = .rbp,
}), emit.code());
try expectEqualHexStrings("\x48\x8B\x45\xFC", emit.lowered(), "mov rax, qword ptr [rbp - 4]");
- try lowerToRmEnc(.lea, Register.reg(.rax), RegisterOrMemory.rip(.qword_ptr, 0x10), emit.code());
+ try lowerToRmEnc(.lea, .rax, RegisterOrMemory.rip(.qword_ptr, 0x10), emit.code());
try expectEqualHexStrings("\x48\x8D\x05\x10\x00\x00\x00", emit.lowered(), "lea rax, [rip + 0x10]");
- try lowerToRmEnc(.mov, Register.reg(.rax), RegisterOrMemory.mem(.qword_ptr, .{
+ try lowerToRmEnc(.mov, .rax, RegisterOrMemory.mem(.qword_ptr, .{
.disp = @bitCast(u32, @as(i32, -8)),
.base = .rbp,
.scale_index = .{
@@ -2540,7 +2540,7 @@ test "lower RM encoding" {
},
}), emit.code());
try expectEqualHexStrings("\x48\x8B\x44\x0D\xF8", emit.lowered(), "mov rax, qword ptr [rbp + rcx*1 - 8]");
- try lowerToRmEnc(.mov, Register.reg(.eax), RegisterOrMemory.mem(.dword_ptr, .{
+ try lowerToRmEnc(.mov, .eax, RegisterOrMemory.mem(.dword_ptr, .{
.disp = @bitCast(u32, @as(i32, -4)),
.base = .rbp,
.scale_index = .{
@@ -2549,7 +2549,7 @@ test "lower RM encoding" {
},
}), emit.code());
try expectEqualHexStrings("\x8B\x44\x95\xFC", emit.lowered(), "mov eax, dword ptr [rbp + rdx*4 - 4]");
- try lowerToRmEnc(.mov, Register.reg(.rax), RegisterOrMemory.mem(.qword_ptr, .{
+ try lowerToRmEnc(.mov, .rax, RegisterOrMemory.mem(.qword_ptr, .{
.disp = @bitCast(u32, @as(i32, -8)),
.base = .rbp,
.scale_index = .{
@@ -2558,7 +2558,7 @@ test "lower RM encoding" {
},
}), emit.code());
try expectEqualHexStrings("\x48\x8B\x44\xCD\xF8", emit.lowered(), "mov rax, qword ptr [rbp + rcx*8 - 8]");
- try lowerToRmEnc(.mov, Register.reg(.r8b), RegisterOrMemory.mem(.byte_ptr, .{
+ try lowerToRmEnc(.mov, .r8b, RegisterOrMemory.mem(.byte_ptr, .{
.disp = @bitCast(u32, @as(i32, -24)),
.base = .rsi,
.scale_index = .{
@@ -2567,7 +2567,7 @@ test "lower RM encoding" {
},
}), emit.code());
try expectEqualHexStrings("\x44\x8A\x44\x0E\xE8", emit.lowered(), "mov r8b, byte ptr [rsi + rcx*1 - 24]");
- try lowerToRmEnc(.lea, Register.reg(.rsi), RegisterOrMemory.mem(.qword_ptr, .{
+ try lowerToRmEnc(.lea, .rsi, RegisterOrMemory.mem(.qword_ptr, .{
.disp = 0,
.base = .rbp,
.scale_index = .{
@@ -2576,33 +2576,25 @@ test "lower RM encoding" {
},
}), emit.code());
try expectEqualHexStrings("\x48\x8D\x74\x0D\x00", emit.lowered(), "lea rsi, qword ptr [rbp + rcx*1 + 0]");
-
- // AVX extension tests
- try lowerToRmEnc(.vmovsd, Register.avxReg(.xmm1), RegisterOrMemory.rip(.qword_ptr, 0x10), emit.code());
- try expectEqualHexStrings(
- "\xC5\xFB\x10\x0D\x10\x00\x00\x00",
- emit.lowered(),
- "vmovsd xmm1, qword ptr [rip + 0x10]",
- );
}
test "lower MR encoding" {
var emit = TestEmit.init();
defer emit.deinit();
- try lowerToMrEnc(.mov, RegisterOrMemory.reg(.rax), Register.reg(.rbx), emit.code());
+ try lowerToMrEnc(.mov, RegisterOrMemory.reg(.rax), .rbx, emit.code());
try expectEqualHexStrings("\x48\x89\xd8", emit.lowered(), "mov rax, rbx");
try lowerToMrEnc(.mov, RegisterOrMemory.mem(.qword_ptr, .{
.disp = @bitCast(u32, @as(i32, -4)),
.base = .rbp,
- }), Register.reg(.r11), emit.code());
+ }), .r11, emit.code());
try expectEqualHexStrings("\x4c\x89\x5d\xfc", emit.lowered(), "mov qword ptr [rbp - 4], r11");
- try lowerToMrEnc(.add, RegisterOrMemory.mem(.byte_ptr, .{ .disp = 0x10000000 }), Register.reg(.r12b), emit.code());
+ try lowerToMrEnc(.add, RegisterOrMemory.mem(.byte_ptr, .{ .disp = 0x10000000 }), .r12b, emit.code());
try expectEqualHexStrings(
"\x44\x00\x24\x25\x00\x00\x00\x10",
emit.lowered(),
"add byte ptr [ds:0x10000000], r12b",
);
- try lowerToMrEnc(.add, RegisterOrMemory.mem(.dword_ptr, .{ .disp = 0x10000000 }), Register.reg(.r12d), emit.code());
+ try lowerToMrEnc(.add, RegisterOrMemory.mem(.dword_ptr, .{ .disp = 0x10000000 }), .r12d, emit.code());
try expectEqualHexStrings(
"\x44\x01\x24\x25\x00\x00\x00\x10",
emit.lowered(),
@@ -2611,61 +2603,53 @@ test "lower MR encoding" {
try lowerToMrEnc(.sub, RegisterOrMemory.mem(.qword_ptr, .{
.disp = 0x10000000,
.base = .r11,
- }), Register.reg(.r12), emit.code());
+ }), .r12, emit.code());
try expectEqualHexStrings(
"\x4D\x29\xA3\x00\x00\x00\x10",
emit.lowered(),
"sub qword ptr [r11 + 0x10000000], r12",
);
- try lowerToMrEnc(.mov, RegisterOrMemory.rip(.qword_ptr, 0x10), Register.reg(.r12), emit.code());
+ try lowerToMrEnc(.mov, RegisterOrMemory.rip(.qword_ptr, 0x10), .r12, emit.code());
try expectEqualHexStrings("\x4C\x89\x25\x10\x00\x00\x00", emit.lowered(), "mov qword ptr [rip + 0x10], r12");
-
- // AVX extension tests
- try lowerToMrEnc(.vmovsd, RegisterOrMemory.rip(.qword_ptr, 0x10), Register.avxReg(.xmm1), emit.code());
- try expectEqualHexStrings(
- "\xC5\xFB\x11\x0D\x10\x00\x00\x00",
- emit.lowered(),
- "vmovsd qword ptr [rip + 0x10], xmm1",
- );
}
test "lower OI encoding" {
var emit = TestEmit.init();
defer emit.deinit();
- try lowerToOiEnc(.mov, Register.reg(.rax), 0x1000000000000000, emit.code());
+ try lowerToOiEnc(.mov, .rax, 0x1000000000000000, emit.code());
try expectEqualHexStrings(
"\x48\xB8\x00\x00\x00\x00\x00\x00\x00\x10",
emit.lowered(),
"movabs rax, 0x1000000000000000",
);
- try lowerToOiEnc(.mov, Register.reg(.r11), 0x1000000000000000, emit.code());
+ try lowerToOiEnc(.mov, .r11, 0x1000000000000000, emit.code());
try expectEqualHexStrings(
"\x49\xBB\x00\x00\x00\x00\x00\x00\x00\x10",
emit.lowered(),
"movabs r11, 0x1000000000000000",
);
- try lowerToOiEnc(.mov, Register.reg(.r11d), 0x10000000, emit.code());
+ try lowerToOiEnc(.mov, .r11d, 0x10000000, emit.code());
try expectEqualHexStrings("\x41\xBB\x00\x00\x00\x10", emit.lowered(), "mov r11d, 0x10000000");
- try lowerToOiEnc(.mov, Register.reg(.r11w), 0x1000, emit.code());
+ try lowerToOiEnc(.mov, .r11w, 0x1000, emit.code());
try expectEqualHexStrings("\x66\x41\xBB\x00\x10", emit.lowered(), "mov r11w, 0x1000");
- try lowerToOiEnc(.mov, Register.reg(.r11b), 0x10, emit.code());
+ try lowerToOiEnc(.mov, .r11b, 0x10, emit.code());
try expectEqualHexStrings("\x41\xB3\x10", emit.lowered(), "mov r11b, 0x10");
}
test "lower FD/TD encoding" {
var emit = TestEmit.init();
defer emit.deinit();
- try lowerToFdEnc(.mov, Register.reg(.rax), 0x1000000000000000, emit.code());
+ try lowerToFdEnc(.mov, .rax, 0x1000000000000000, emit.code());
try expectEqualHexStrings(
"\x48\xa1\x00\x00\x00\x00\x00\x00\x00\x10",
emit.lowered(),
"mov rax, ds:0x1000000000000000",
);
- try lowerToFdEnc(.mov, Register.reg(.eax), 0x10000000, emit.code());
+ try lowerToFdEnc(.mov, .eax, 0x10000000, emit.code());
try expectEqualHexStrings("\xa1\x00\x00\x00\x10", emit.lowered(), "mov eax, ds:0x10000000");
- try lowerToFdEnc(.mov, Register.reg(.ax), 0x1000, emit.code());
+ try lowerToFdEnc(.mov, .ax, 0x1000, emit.code());
try expectEqualHexStrings("\x66\xa1\x00\x10", emit.lowered(), "mov ax, ds:0x1000");
- try lowerToFdEnc(.mov, Register.reg(.al), 0x10, emit.code());
+ try lowerToFdEnc(.mov, .al, 0x10, emit.code());
try expectEqualHexStrings("\xa0\x10", emit.lowered(), "mov al, ds:0x10");
}
@@ -2741,16 +2725,16 @@ test "lower M1 and MC encodings" {
test "lower O encoding" {
var emit = TestEmit.init();
defer emit.deinit();
- try lowerToOEnc(.pop, Register.reg(.r12), emit.code());
+ try lowerToOEnc(.pop, .r12, emit.code());
try expectEqualHexStrings("\x41\x5c", emit.lowered(), "pop r12");
- try lowerToOEnc(.push, Register.reg(.r12w), emit.code());
+ try lowerToOEnc(.push, .r12w, emit.code());
try expectEqualHexStrings("\x66\x41\x54", emit.lowered(), "push r12w");
}
test "lower RMI encoding" {
var emit = TestEmit.init();
defer emit.deinit();
- try lowerToRmiEnc(.imul, Register.reg(.rax), RegisterOrMemory.mem(.qword_ptr, .{
+ try lowerToRmiEnc(.imul, .rax, RegisterOrMemory.mem(.qword_ptr, .{
.disp = @bitCast(u32, @as(i32, -8)),
.base = .rbp,
}), 0x10, emit.code());
@@ -2759,39 +2743,49 @@ test "lower RMI encoding" {
emit.lowered(),
"imul rax, qword ptr [rbp - 8], 0x10",
);
- try lowerToRmiEnc(.imul, Register.reg(.eax), RegisterOrMemory.mem(.dword_ptr, .{
+ try lowerToRmiEnc(.imul, .eax, RegisterOrMemory.mem(.dword_ptr, .{
.disp = @bitCast(u32, @as(i32, -4)),
.base = .rbp,
}), 0x10, emit.code());
try expectEqualHexStrings("\x69\x45\xFC\x10\x00\x00\x00", emit.lowered(), "imul eax, dword ptr [rbp - 4], 0x10");
- try lowerToRmiEnc(.imul, Register.reg(.ax), RegisterOrMemory.mem(.word_ptr, .{
+ try lowerToRmiEnc(.imul, .ax, RegisterOrMemory.mem(.word_ptr, .{
.disp = @bitCast(u32, @as(i32, -2)),
.base = .rbp,
}), 0x10, emit.code());
try expectEqualHexStrings("\x66\x69\x45\xFE\x10\x00", emit.lowered(), "imul ax, word ptr [rbp - 2], 0x10");
- try lowerToRmiEnc(.imul, Register.reg(.r12), RegisterOrMemory.reg(.r12), 0x10, emit.code());
+ try lowerToRmiEnc(.imul, .r12, RegisterOrMemory.reg(.r12), 0x10, emit.code());
try expectEqualHexStrings("\x4D\x69\xE4\x10\x00\x00\x00", emit.lowered(), "imul r12, r12, 0x10");
- try lowerToRmiEnc(.imul, Register.reg(.r12w), RegisterOrMemory.reg(.r12w), 0x10, emit.code());
+ try lowerToRmiEnc(.imul, .r12w, RegisterOrMemory.reg(.r12w), 0x10, emit.code());
try expectEqualHexStrings("\x66\x45\x69\xE4\x10\x00", emit.lowered(), "imul r12w, r12w, 0x10");
}
-test "lower to RVM encoding" {
+test "lower MV encoding" {
var emit = TestEmit.init();
defer emit.deinit();
- try lowerToRvmEnc(
- .vaddsd,
- Register.avxReg(.xmm0),
- Register.avxReg(.xmm1),
- RegisterOrMemory.avxReg(.xmm2),
- emit.code(),
+ try lowerToMvEnc(.vmovsd, RegisterOrMemory.rip(.qword_ptr, 0x10), .xmm1, emit.code());
+ try expectEqualHexStrings(
+ "\xC5\xFB\x11\x0D\x10\x00\x00\x00",
+ emit.lowered(),
+ "vmovsd qword ptr [rip + 0x10], xmm1",
);
- try expectEqualHexStrings("\xC5\xF3\x58\xC2", emit.lowered(), "vaddsd xmm0, xmm1, xmm2");
- try lowerToRvmEnc(
- .vaddsd,
- Register.avxReg(.xmm0),
- Register.avxReg(.xmm0),
- RegisterOrMemory.avxReg(.xmm1),
- emit.code(),
+}
+
+test "lower VM encoding" {
+ var emit = TestEmit.init();
+ defer emit.deinit();
+ try lowerToVmEnc(.vmovsd, .xmm1, RegisterOrMemory.rip(.qword_ptr, 0x10), emit.code());
+ try expectEqualHexStrings(
+ "\xC5\xFB\x10\x0D\x10\x00\x00\x00",
+ emit.lowered(),
+ "vmovsd xmm1, qword ptr [rip + 0x10]",
);
+}
+
+test "lower to RVM encoding" {
+ var emit = TestEmit.init();
+ defer emit.deinit();
+ try lowerToRvmEnc(.vaddsd, .xmm0, .xmm1, RegisterOrMemory.reg(.xmm2), emit.code());
+ try expectEqualHexStrings("\xC5\xF3\x58\xC2", emit.lowered(), "vaddsd xmm0, xmm1, xmm2");
+ try lowerToRvmEnc(.vaddsd, .xmm0, .xmm0, RegisterOrMemory.reg(.xmm1), emit.code());
try expectEqualHexStrings("\xC5\xFB\x58\xC1", emit.lowered(), "vaddsd xmm0, xmm0, xmm1");
}
diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig
index ef50279d03..a1062ba6b4 100644
--- a/src/arch/x86_64/Mir.zig
+++ b/src/arch/x86_64/Mir.zig
@@ -14,8 +14,7 @@ const assert = std.debug.assert;
const bits = @import("bits.zig");
const Air = @import("../../Air.zig");
const CodeGen = @import("CodeGen.zig");
-const GpRegister = bits.Register;
-const AvxRegister = bits.AvxRegister;
+const Register = bits.Register;
instructions: std.MultiArrayList(Inst).Slice,
/// The meaning of this data is determined by `Inst.Tag` value.
@@ -23,11 +22,7 @@ extra: []const u32,
pub const Inst = struct {
tag: Tag,
- /// This is 3 fields, and the meaning of each depends on `tag`.
- /// reg1: Register
- /// reg2: Register
- /// flags: u2
- ops: u16,
+ ops: Ops,
/// The meaning of this depends on `tag` and `ops`.
data: Data,
@@ -397,6 +392,36 @@ pub const Inst = struct {
/// The position of an MIR instruction within the `Mir` instructions array.
pub const Index = u32;
+ pub const Ops = packed struct {
+ reg1: u7,
+ reg2: u7,
+ flags: u2,
+
+ pub fn encode(vals: struct {
+ reg1: Register = .none,
+ reg2: Register = .none,
+ flags: u2 = 0b00,
+ }) Ops {
+ return .{
+ .reg1 = @enumToInt(vals.reg1),
+ .reg2 = @enumToInt(vals.reg2),
+ .flags = vals.flags,
+ };
+ }
+
+ pub fn decode(ops: Ops) struct {
+ reg1: Register,
+ reg2: Register,
+ flags: u2,
+ } {
+ return .{
+ .reg1 = @intToEnum(Register, ops.reg1),
+ .reg2 = @intToEnum(Register, ops.reg2),
+ .flags = ops.flags,
+ };
+ }
+ };
+
/// All instructions have a 4-byte payload, which is contained within
/// this union. `Tag` determines which union field is active, as well as
/// how to interpret the data within.
@@ -466,33 +491,6 @@ pub const DbgLineColumn = struct {
column: u32,
};
-pub fn Ops(comptime Reg1: type, comptime Reg2: type) type {
- return struct {
- reg1: Reg1 = .none,
- reg2: Reg2 = .none,
- flags: u2 = 0b00,
-
- pub fn encode(self: @This()) u16 {
- var ops: u16 = 0;
- ops |= @intCast(u16, @enumToInt(self.reg1)) << 9;
- ops |= @intCast(u16, @enumToInt(self.reg2)) << 2;
- ops |= self.flags;
- return ops;
- }
-
- pub fn decode(ops: u16) @This() {
- const reg1 = @intToEnum(Reg1, @truncate(u7, ops >> 9));
- const reg2 = @intToEnum(Reg2, @truncate(u7, ops >> 2));
- const flags = @truncate(u2, ops);
- return .{
- .reg1 = reg1,
- .reg2 = reg2,
- .flags = flags,
- };
- }
- };
-}
-
pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.instructions.deinit(gpa);
gpa.free(mir.extra);
diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig
index a53c82530d..85bf3a0790 100644
--- a/src/arch/x86_64/abi.zig
+++ b/src/arch/x86_64/abi.zig
@@ -3,7 +3,6 @@ const Type = @import("../../type.zig").Type;
const Target = std.Target;
const assert = std.debug.assert;
const Register = @import("bits.zig").Register;
-const AvxRegister = @import("bits.zig").AvxRegister;
pub const Class = enum { integer, sse, sseup, x87, x87up, complex_x87, memory, none };
@@ -379,11 +378,17 @@ pub const callee_preserved_regs = [_]Register{ .rbx, .r12, .r13, .r14, .r15 };
/// the caller relinquishes control to a subroutine via call instruction (or similar).
/// In other words, these registers are free to use by the callee.
pub const caller_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .rsi, .rdi, .r8, .r9, .r10, .r11 };
-pub const allocatable_registers = callee_preserved_regs ++ caller_preserved_regs;
-pub const c_abi_int_param_regs = [_]Register{ .rdi, .rsi, .rdx, .rcx, .r8, .r9 };
-pub const c_abi_int_return_regs = [_]Register{ .rax, .rdx };
-
-pub const avx_regs = [_]AvxRegister{
+pub const avx_regs = [_]Register{
.ymm0, .ymm1, .ymm2, .ymm3, .ymm4, .ymm5, .ymm6, .ymm7,
.ymm8, .ymm9, .ymm10, .ymm11, .ymm12, .ymm13, .ymm14, .ymm15,
};
+pub const allocatable_registers = callee_preserved_regs ++ caller_preserved_regs ++ avx_regs;
+
+// Masks for register manager
+const FreeRegInt = std.meta.Int(.unsigned, allocatable_registers.len);
+// TODO
+pub const gp_mask: FreeRegInt = 0x3fff;
+pub const avx_mask: FreeRegInt = 0x3fff_c000;
+
+pub const c_abi_int_param_regs = [_]Register{ .rdi, .rsi, .rdx, .rcx, .r8, .r9 };
+pub const c_abi_int_return_regs = [_]Register{ .rax, .rdx };
diff --git a/src/arch/x86_64/bits.zig b/src/arch/x86_64/bits.zig
index 0bba295c20..85bd190e2b 100644
--- a/src/arch/x86_64/bits.zig
+++ b/src/arch/x86_64/bits.zig
@@ -43,17 +43,36 @@ pub const Register = enum(u7) {
al, cl, dl, bl, ah, ch, dh, bh,
r8b, r9b, r10b, r11b, r12b, r13b, r14b, r15b,
- // Pseudo, used only for MIR to signify that the
- // operand is not a register but an immediate, etc.
+ // 64-79, 256-bit registers.
+ // id is int value - 64.
+ ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7,
+ ymm8, ymm9, ymm10, ymm11, ymm12, ymm13, ymm14, ymm15,
+
+ // 80-95, 128-bit registers.
+ // id is int value - 80.
+ xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,
+ xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,
+
+ // Pseudo-value for MIR instructions.
none,
+ pub fn id(self: Register) u5 {
+ return switch (@enumToInt(self)) {
+ 0...63 => @as(u5, @truncate(u4, @enumToInt(self))),
+ 64...79 => @truncate(u5, @enumToInt(self)),
+ else => unreachable,
+ };
+ }
+
/// Returns the bit-width of the register.
- pub fn size(self: Register) u7 {
+ pub fn size(self: Register) u9 {
return switch (@enumToInt(self)) {
0...15 => 64,
16...31 => 32,
32...47 => 16,
- 48...64 => 8,
+ 48...63 => 8,
+ 64...79 => 256,
+ 80...95 => 128,
else => unreachable,
};
}
@@ -72,15 +91,23 @@ pub const Register = enum(u7) {
/// an instruction (@see isExtended), and requires special handling. The
/// lower three bits are often embedded directly in instructions (such as
/// the B8 variant of moves), or used in R/M bytes.
- pub fn id(self: Register) u4 {
+ pub fn enc(self: Register) u4 {
return @truncate(u4, @enumToInt(self));
}
- /// Like id, but only returns the lower 3 bits.
- pub fn lowId(self: Register) u3 {
+ /// Like enc, but only returns the lower 3 bits.
+ pub fn lowEnc(self: Register) u3 {
return @truncate(u3, @enumToInt(self));
}
+ pub fn to256(self: Register) Register {
+ return @intToEnum(Register, @as(u8, self.id()) + 64);
+ }
+
+ pub fn to128(self: Register) Register {
+ return @intToEnum(Register, @as(u8, self.id()) + 80);
+ }
+
/// Convert from any register to its 64 bit alias.
pub fn to64(self: Register) Register {
return @intToEnum(Register, self.id());
@@ -126,57 +153,6 @@ pub const Register = enum(u7) {
}
};
-/// AVX registers.
-/// TODO missing dwarfLocOp implementation.
-/// TODO add support for AVX-512
-pub const AvxRegister = enum(u6) {
- // 256-bit registers
- ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7,
- ymm8, ymm9, ymm10, ymm11, ymm12, ymm13, ymm14, ymm15,
-
- // 128-bit registers
- xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,
- xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,
-
- // Pseudo, used only for MIR to signify that the
- // operand is not a register but an immediate, etc.
- none,
-
- /// Returns the bit-width of the register.
- pub fn size(self: AvxRegister) u9 {
- return switch (@enumToInt(self)) {
- 0...15 => 256,
- 16...31 => 128,
- else => unreachable,
- };
- }
-
- /// Returns whether the register is *extended*.
- pub fn isExtended(self: AvxRegister) bool {
- return @enumToInt(self) & 0x08 != 0;
- }
-
- /// This returns the 4-bit register ID.
- pub fn id(self: AvxRegister) u4 {
- return @truncate(u4, @enumToInt(self));
- }
-
- /// Like id, but only returns the lower 3 bits.
- pub fn lowId(self: AvxRegister) u3 {
- return @truncate(u3, @enumToInt(self));
- }
-
- /// Convert from any register to its 256 bit alias.
- pub fn to256(self: AvxRegister) AvxRegister {
- return @intToEnum(AvxRegister, self.id());
- }
-
- /// Convert from any register to its 128 bit alias.
- pub fn to128(self: AvxRegister) AvxRegister {
- return @intToEnum(AvxRegister, @as(u8, self.id()) + 16);
- }
-};
-
// zig fmt: on
/// Encoding helper functions for x86_64 instructions
@@ -792,7 +768,7 @@ test "Encoder helpers - Vex prefix" {
{
stream.reset();
var vex_prefix = Encoder.Vex{};
- vex_prefix.reg(AvxRegister.xmm15.id());
+ vex_prefix.reg(Register.xmm15.id());
const nwritten = vex_prefix.write(writer);
try testing.expectEqualSlices(u8, &[_]u8{ 0xc5, 0x80 }, buf[0..nwritten]);
}
@@ -832,7 +808,7 @@ test "Encoder helpers - Vex prefix" {
vex.simd_prefix_66();
encoder.vex(vex); // use 64 bit operation
encoder.opcode_1byte(0x28);
- encoder.modRm_direct(0, AvxRegister.xmm1.lowId());
+ encoder.modRm_direct(0, Register.xmm1.lowId());
try testing.expectEqualSlices(u8, &[_]u8{ 0xC5, 0xF9, 0x28, 0xC1 }, code.items);
}
@@ -846,10 +822,10 @@ test "Encoder helpers - Vex prefix" {
vex.simd_prefix_66();
vex.lead_opc_0f();
vex.rex(.{ .r = true });
- vex.reg(AvxRegister.xmm1.id());
+ vex.reg(Register.xmm1.id());
encoder.vex(vex);
encoder.opcode_1byte(0x16);
- encoder.modRm_RIPDisp32(AvxRegister.xmm13.lowId());
+ encoder.modRm_RIPDisp32(Register.xmm13.lowId());
encoder.disp32(0);
try testing.expectEqualSlices(u8, &[_]u8{ 0xC5, 0x71, 0x16, 0x2D, 0x00, 0x00, 0x00, 0x00 }, code.items);
}