aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2022-01-04 01:53:02 -0500
committerGitHub <noreply@github.com>2022-01-04 01:53:02 -0500
commit76fd6fc36505a203fba77e2103db12be59514532 (patch)
tree43e1ee8b8f5d933fabc06fa32c846dd9c26a4983 /src
parent4e38b3ed9ba6969e5b6c9e65125a5dbd49726a0b (diff)
parenta8ff51b0920fdc7edcaca332dd2c9fab6f497ca9 (diff)
downloadzig-76fd6fc36505a203fba77e2103db12be59514532.tar.gz
zig-76fd6fc36505a203fba77e2103db12be59514532.zip
Merge pull request #10503 from ziglang/stage2-x86_64-zig-test
stage2: enable "zig test" on x86_64
Diffstat (limited to 'src')
-rw-r--r--src/arch/x86_64/CodeGen.zig424
-rw-r--r--src/arch/x86_64/Isel.zig671
-rw-r--r--src/arch/x86_64/Mir.zig1
3 files changed, 628 insertions, 468 deletions
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 51da348265..b8d832c5cf 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -415,9 +415,14 @@ fn gen(self: *Self) InnerError!void {
try self.genBody(self.air.getMainBody());
- if (self.exitlude_jump_relocs.items.len == 1) {
- self.mir_instructions.len -= 1;
- } else for (self.exitlude_jump_relocs.items) |jmp_reloc| {
+ // TODO can single exitlude jump reloc be elided? What if it is not at the end of the code?
+ // Example:
+ // pub fn main() void {
+ // maybeErr() catch return;
+ // unreachable;
+ // }
+ // Eliding the reloc will cause a miscompilation in this case.
+ for (self.exitlude_jump_relocs.items) |jmp_reloc| {
self.mir_instructions.items(.data)[jmp_reloc].inst = @intCast(u32, self.mir_instructions.len);
}
@@ -1180,19 +1185,24 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst))
- .dead
- else
- return self.fail("TODO implement unwrap error union error for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const err_union_ty = self.air.typeOf(ty_op.operand);
+ const payload_ty = err_union_ty.errorUnionPayload();
+ const mcv = try self.resolveInst(ty_op.operand);
+ if (!payload_ty.hasCodeGenBits()) break :result mcv;
+ return self.fail("TODO implement unwrap error union error for non-empty payloads", .{});
+ };
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst))
- .dead
- else
- return self.fail("TODO implement unwrap error union payload for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const err_union_ty = self.air.typeOf(ty_op.operand);
+ const payload_ty = err_union_ty.errorUnionPayload();
+ if (!payload_ty.hasCodeGenBits()) break :result MCValue.none;
+ return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{});
+ };
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -1261,10 +1271,14 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst))
- .dead
- else
- return self.fail("TODO implement slice_len for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const operand = try self.resolveInst(ty_op.operand);
+ const dst_mcv: MCValue = switch (operand) {
+ .stack_offset => |off| MCValue{ .stack_offset = off + 8 },
+ else => return self.fail("TODO implement slice_len for {}", .{operand}),
+ };
+ break :result dst_mcv;
+ };
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -1289,10 +1303,57 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst))
- .dead
- else
- return self.fail("TODO implement slice_elem_val for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else result: {
+ const slice_mcv = try self.resolveInst(bin_op.lhs);
+ const slice_ty = self.air.typeOf(bin_op.lhs);
+
+ const elem_ty = slice_ty.childType();
+ const elem_size = elem_ty.abiSize(self.target.*);
+
+ var buf: Type.SlicePtrFieldTypeBuffer = undefined;
+ const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf);
+
+ const index_ty = self.air.typeOf(bin_op.rhs);
+ const index_mcv: MCValue = blk: {
+ switch (try self.resolveInst(bin_op.rhs)) {
+ .register => |reg| {
+ if (reg.to64() != .rcx) {
+ try self.register_manager.getReg(.rcx, inst);
+ }
+ break :blk MCValue{ .register = .rcx };
+ },
+ else => return self.fail("TODO move index mcv into a register", .{}),
+ }
+ };
+
+ try self.genIMulOpMir(index_ty, index_mcv, .{ .immediate = elem_size });
+
+ const dst_mcv = blk: {
+ switch (slice_mcv) {
+ .stack_offset => |unadjusted_off| {
+ const dst_mcv = try self.allocRegOrMem(inst, false);
+ const addr_reg = try self.register_manager.allocReg(null, &.{index_mcv.register});
+ const slice_ptr_abi_size = @intCast(u32, slice_ptr_field_type.abiSize(self.target.*));
+ const off = unadjusted_off + elem_size;
+ // lea reg, [rbp - 8 + rcx*1]
+ _ = try self.addInst(.{
+ .tag = .lea,
+ .ops = (Mir.Ops{
+ .reg1 = registerAlias(addr_reg, slice_ptr_abi_size),
+ .reg2 = .rbp,
+ .flags = 0b11,
+ }).encode(),
+ .data = .{ .imm = -@intCast(i32, off) },
+ });
+ try self.load(dst_mcv, .{ .register = addr_reg }, slice_ptr_field_type);
+ break :blk dst_mcv;
+ },
+ else => return self.fail("TODO implement slice_elem_val when slice is {}", .{slice_mcv}),
+ }
+ };
+
+ break :result dst_mcv;
+ };
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -1427,10 +1488,9 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
.embedded_in_code => {
return self.fail("TODO implement loading from MCValue.embedded_in_code", .{});
},
- .register => |reg| try self.setRegOrMem(elem_ty, dst_mcv, .{ .register = reg }),
+ .register => |reg| try self.setRegOrMem(ptr_ty, dst_mcv, .{ .register = reg }),
.memory => |addr| {
- const reg = try self.register_manager.allocReg(null, &.{});
- try self.genSetReg(ptr_ty, reg, .{ .memory = addr });
+ const reg = try self.copyToTmpRegister(ptr_ty, .{ .memory = addr });
try self.load(dst_mcv, .{ .register = reg }, ptr_ty);
},
.stack_offset => {
@@ -1505,28 +1565,60 @@ fn airStore(self: *Self, inst: Air.Inst.Index) !void {
fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
- return self.structFieldPtr(extra.struct_operand, ty_pl.ty, extra.field_index);
+ const result = try self.structFieldPtr(inst, extra.struct_operand, extra.field_index);
+ return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none });
}
fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- return self.structFieldPtr(ty_op.operand, ty_op.ty, index);
+ const result = try self.structFieldPtr(inst, ty_op.operand, index);
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn structFieldPtr(self: *Self, operand: Air.Inst.Ref, ty: Air.Inst.Ref, index: u32) !void {
- _ = self;
- _ = operand;
- _ = ty;
- _ = index;
- return self.fail("TODO implement codegen struct_field_ptr", .{});
- //return self.finishAir(inst, result, .{ extra.struct_ptr, .none, .none });
+
+fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
+ return if (self.liveness.isUnused(inst)) .dead else result: {
+ const mcv = try self.resolveInst(operand);
+ const struct_ty = self.air.typeOf(operand).childType();
+ const struct_size = @intCast(u32, struct_ty.abiSize(self.target.*));
+ const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
+ const struct_field_ty = struct_ty.structFieldType(index);
+ const struct_field_size = @intCast(u32, struct_field_ty.abiSize(self.target.*));
+
+ switch (mcv) {
+ .ptr_stack_offset => |off| {
+ break :result MCValue{
+ .ptr_stack_offset = off + struct_size - struct_field_offset - struct_field_size,
+ };
+ },
+ else => return self.fail("TODO implement codegen struct_field_ptr for {}", .{mcv}),
+ }
+ };
}
fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
- _ = extra;
- return self.fail("TODO implement codegen struct_field_val", .{});
- //return self.finishAir(inst, result, .{ extra.struct_ptr, .none, .none });
+ const operand = extra.struct_operand;
+ const index = extra.field_index;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const mcv = try self.resolveInst(operand);
+ const struct_ty = self.air.typeOf(operand);
+ const struct_size = @intCast(u32, struct_ty.abiSize(self.target.*));
+ const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
+ const struct_field_ty = struct_ty.structFieldType(index);
+ const struct_field_size = @intCast(u32, struct_field_ty.abiSize(self.target.*));
+
+ switch (mcv) {
+ .stack_offset => |off| {
+ break :result MCValue{
+ .stack_offset = off + struct_size - struct_field_offset - struct_field_size,
+ };
+ },
+ else => return self.fail("TODO implement codegen struct_field_val for {}", .{mcv}),
+ }
+ };
+
+ return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none });
}
/// Perform "binary" operators, excluding comparisons.
@@ -1778,6 +1870,7 @@ fn genIMulOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !
.data = .{ .imm = @intCast(i32, imm) },
});
} else {
+ // TODO verify we don't spill and assign to the same register as dst_mcv
const src_reg = try self.copyToTmpRegister(dst_ty, src_mcv);
return self.genIMulOpMir(dst_ty, dst_mcv, MCValue{ .register = src_reg });
}
@@ -1887,10 +1980,16 @@ fn airFence(self: *Self) !void {
fn airCall(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
- const fn_ty = self.air.typeOf(pl_op.operand);
const callee = pl_op.operand;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
+ const ty = self.air.typeOf(callee);
+
+ const fn_ty = switch (ty.zigTypeTag()) {
+ .Fn => ty,
+ .Pointer => ty.childType(),
+ else => unreachable,
+ };
var info = try self.resolveCallingConventionValues(fn_ty);
defer info.deinit(self);
@@ -1957,7 +2056,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
- return self.fail("TODO implement calling runtime known function pointer", .{});
+ assert(ty.zigTypeTag() == .Pointer);
+ const mcv = try self.resolveInst(callee);
+ try self.genSetReg(Type.initTag(.usize), .rax, mcv);
}
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
for (info.args) |mc_arg, arg_i| {
@@ -2000,7 +2101,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
const func = func_payload.data;
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
- try self.genSetReg(Type.initTag(.u64), .rax, .{
+ try self.genSetReg(Type.initTag(.usize), .rax, .{
.memory = func.owner_decl.link.macho.local_sym_index,
});
// callq *%rax
@@ -2024,7 +2125,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
- return self.fail("TODO implement calling runtime known function pointer", .{});
+ assert(ty.zigTypeTag() == .Pointer);
+ const mcv = try self.resolveInst(callee);
+ try self.genSetReg(Type.initTag(.usize), .rax, mcv);
}
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
for (info.args) |mc_arg, arg_i| {
@@ -2142,12 +2245,19 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- if (self.liveness.isUnused(inst))
+
+ if (self.liveness.isUnused(inst)) {
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
+ }
+
const ty = self.air.typeOf(bin_op.lhs);
- assert(ty.eql(self.air.typeOf(bin_op.rhs)));
- if (ty.zigTypeTag() == .ErrorSet)
- return self.fail("TODO implement cmp for errors", .{});
+ const signedness: std.builtin.Signedness = blk: {
+ // For non-int types, we treat the values as unsigned
+ if (ty.zigTypeTag() != .Int) break :blk .unsigned;
+
+ // Otherwise, we take the signedness of the actual int
+ break :blk ty.intInfo(self.target.*).signedness;
+ };
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -2163,9 +2273,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const src_mcv = try self.limitImmediateType(bin_op.rhs, i32);
try self.genBinMathOpMir(.cmp, ty, dst_mcv, src_mcv);
- break :result switch (ty.isSignedInt()) {
- true => MCValue{ .compare_flags_signed = op },
- false => MCValue{ .compare_flags_unsigned = op },
+ break :result switch (signedness) {
+ .signed => MCValue{ .compare_flags_signed = op },
+ .unsigned => MCValue{ .compare_flags_unsigned = op },
};
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
@@ -2274,6 +2384,9 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const parent_registers = self.register_manager.registers;
try self.branch_stack.append(.{});
+ errdefer {
+ _ = self.branch_stack.pop();
+ }
try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len);
for (liveness_condbr.then_deaths) |operand| {
@@ -2393,19 +2506,35 @@ fn isNonNull(self: *Self, ty: Type, operand: MCValue) !MCValue {
}
fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
- _ = ty;
- _ = operand;
- // Here you can specialize this instruction if it makes sense to, otherwise the default
- // will call isNonErr and invert the result.
- return self.fail("TODO call isNonErr and invert the result", .{});
+ const err_type = ty.errorUnionSet();
+ const payload_type = ty.errorUnionPayload();
+ if (!err_type.hasCodeGenBits()) {
+ return MCValue{ .immediate = 0 }; // always false
+ } else if (!payload_type.hasCodeGenBits()) {
+ if (err_type.abiSize(self.target.*) <= 8) {
+ try self.genBinMathOpMir(.cmp, err_type, operand, MCValue{ .immediate = 0 });
+ return MCValue{ .compare_flags_unsigned = .gt };
+ } else {
+ return self.fail("TODO isErr for errors with size larger than register size", .{});
+ }
+ } else {
+ return self.fail("TODO isErr for non-empty payloads", .{});
+ }
}
fn isNonErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
- _ = ty;
- _ = operand;
- // Here you can specialize this instruction if it makes sense to, otherwise the default
- // will call isErr and invert the result.
- return self.fail("TODO call isErr and invert the result", .{});
+ const is_err_res = try self.isErr(ty, operand);
+ switch (is_err_res) {
+ .compare_flags_unsigned => |op| {
+ assert(op == .gt);
+ return MCValue{ .compare_flags_unsigned = .lte };
+ },
+ .immediate => |imm| {
+ assert(imm == 0);
+ return MCValue{ .immediate = 1 };
+ },
+ else => unreachable,
+ }
}
fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
@@ -2877,12 +3006,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
},
}
},
- .embedded_in_code => {
- // TODO this and `.stack_offset` below need to get improved to support types greater than
- // register size, and do general memcpy
- const reg = try self.copyToTmpRegister(ty, mcv);
- return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
- },
.register => |reg| {
if (stack_offset > math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
@@ -2899,23 +3022,137 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
.data = .{ .imm = -@intCast(i32, adj_off) },
});
},
- .memory => |vaddr| {
- _ = vaddr;
- return self.fail("TODO implement set stack variable from memory vaddr", .{});
+ .memory, .embedded_in_code => {
+ if (ty.abiSize(self.target.*) <= 8) {
+ const reg = try self.copyToTmpRegister(ty, mcv);
+ return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
+ }
+ return self.fail("TODO implement memcpy for setting stack from {}", .{mcv});
},
- .stack_offset => |off| {
- // TODO this and `.embedded_in_code` above need to get improved to support types greater than
- // register size, and do general memcpy
+ .stack_offset => |unadjusted_off| {
+ if (stack_offset == unadjusted_off) {
+ // Copy stack variable to itself; nothing to do.
+ return;
+ }
+
+ const abi_size = ty.abiSize(self.target.*);
+ if (abi_size <= 8) {
+ const reg = try self.copyToTmpRegister(ty, mcv);
+ return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
+ }
+
+ const regs = try self.register_manager.allocRegs(2, .{ null, null }, &.{});
+ const addr_reg = regs[0];
+ const len_reg = regs[1];
+
+ const off = unadjusted_off + abi_size;
+ _ = try self.addInst(.{
+ .tag = .lea,
+ .ops = (Mir.Ops{
+ .reg1 = addr_reg.to64(),
+ .reg2 = .rbp,
+ }).encode(),
+ .data = .{ .imm = -@intCast(i32, off) },
+ });
- if (stack_offset == off)
- return; // Copy stack variable to itself; nothing to do.
+ // TODO allow for abi_size to be u64
+ try self.genSetReg(Type.initTag(.u32), len_reg, .{ .immediate = @intCast(u32, abi_size) });
- const reg = try self.copyToTmpRegister(ty, mcv);
- return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
+ return self.genInlineMemcpy(-@intCast(i32, off), addr_reg.to64(), len_reg.to64());
},
}
}
+fn genInlineMemcpy(self: *Self, stack_offset: i32, addr_reg: Register, len_reg: Register) InnerError!void {
+ try self.register_manager.getReg(.rax, null);
+ try self.register_manager.getReg(.rcx, null);
+ const tmp_reg = try self.register_manager.allocReg(null, &.{ addr_reg, len_reg, .rax, .rcx });
+
+ // mov rcx, 0
+ _ = try self.addInst(.{
+ .tag = .mov,
+ .ops = (Mir.Ops{
+ .reg1 = .rcx,
+ }).encode(),
+ .data = .{ .imm = 0 },
+ });
+
+ // mov rax, 0
+ _ = try self.addInst(.{
+ .tag = .mov,
+ .ops = (Mir.Ops{
+ .reg1 = .rax,
+ }).encode(),
+ .data = .{ .imm = 0 },
+ });
+
+ // loop:
+ // cmp rcx, len
+ const loop_start = try self.addInst(.{
+ .tag = .cmp,
+ .ops = (Mir.Ops{
+ .reg1 = .rcx,
+ .reg2 = len_reg,
+ }).encode(),
+ .data = undefined,
+ });
+
+ // jge end
+ const loop_reloc = try self.addInst(.{
+ .tag = .cond_jmp_above_below,
+ .ops = (Mir.Ops{ .flags = 0b00 }).encode(),
+ .data = .{ .inst = undefined },
+ });
+
+ // mov tmp, [addr + rcx]
+ _ = try self.addInst(.{
+ .tag = .mov_scale_src,
+ .ops = (Mir.Ops{
+ .reg1 = tmp_reg.to8(),
+ .reg2 = addr_reg,
+ }).encode(),
+ .data = .{ .imm = 0 },
+ });
+
+ // mov [stack_offset + rax], tmp
+ _ = try self.addInst(.{
+ .tag = .mov_scale_dst,
+ .ops = (Mir.Ops{
+ .reg1 = .rbp,
+ .reg2 = tmp_reg.to8(),
+ }).encode(),
+ .data = .{ .imm = stack_offset },
+ });
+
+ // add rcx, 1
+ _ = try self.addInst(.{
+ .tag = .add,
+ .ops = (Mir.Ops{
+ .reg1 = .rcx,
+ }).encode(),
+ .data = .{ .imm = 1 },
+ });
+
+ // add rax, 1
+ _ = try self.addInst(.{
+ .tag = .add,
+ .ops = (Mir.Ops{
+ .reg1 = .rax,
+ }).encode(),
+ .data = .{ .imm = 1 },
+ });
+
+ // jmp loop
+ _ = try self.addInst(.{
+ .tag = .jmp,
+ .ops = (Mir.Ops{ .flags = 0b00 }).encode(),
+ .data = .{ .inst = loop_start },
+ });
+
+ // end:
+ try self.performReloc(loop_reloc);
+}
+
/// Set pointee via pointer stored in a register.
/// mov [reg], value
fn genSetPtrReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
@@ -3436,31 +3673,32 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
}
},
.ErrorSet => {
- switch (typed_value.val.tag()) {
- .@"error" => {
- const err_name = typed_value.val.castTag(.@"error").?.data.name;
- const module = self.bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return MCValue{ .immediate = error_index };
- },
- else => {
- // In this case we are rendering an error union which has a 0 bits payload.
- return MCValue{ .immediate = 0 };
- },
- }
+ const err_name = typed_value.val.castTag(.@"error").?.data.name;
+ const module = self.bin_file.options.module.?;
+ const global_error_set = module.global_error_set;
+ const error_index = global_error_set.get(err_name).?;
+ return MCValue{ .immediate = error_index };
},
.ErrorUnion => {
const error_type = typed_value.ty.errorUnionSet();
const payload_type = typed_value.ty.errorUnionPayload();
- const sub_val = typed_value.val.castTag(.eu_payload).?.data;
- if (!payload_type.hasCodeGenBits()) {
- // We use the error type directly as the type.
- return self.genTypedValue(.{ .ty = error_type, .val = sub_val });
+ if (typed_value.val.castTag(.eu_payload)) |pl| {
+ if (!payload_type.hasCodeGenBits()) {
+ // We use the error type directly as the type.
+ return MCValue{ .immediate = 0 };
+ }
+
+ _ = pl;
+ return self.fail("TODO implement error union const of type '{}' (non-error)", .{typed_value.ty});
+ } else {
+ if (!payload_type.hasCodeGenBits()) {
+ // We use the error type directly as the type.
+ return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val });
+ }
}
- return self.fail("TODO implement error union const of type '{}'", .{typed_value.ty});
+ return self.fail("TODO implement error union const of type '{}' (error)", .{typed_value.ty});
},
else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty}),
}
@@ -3557,8 +3795,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
.Naked => unreachable,
.Unspecified, .C => {
const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
- const aliased_reg = registerAlias(c_abi_int_return_regs[0], ret_ty_size);
- result.return_value = .{ .register = aliased_reg };
+ if (ret_ty_size <= 8) {
+ const aliased_reg = registerAlias(c_abi_int_return_regs[0], ret_ty_size);
+ result.return_value = .{ .register = aliased_reg };
+ } else {
+ return self.fail("TODO support more return types for x86_64 backend", .{});
+ }
},
else => return self.fail("TODO implement function return values for {}", .{cc}),
}
diff --git a/src/arch/x86_64/Isel.zig b/src/arch/x86_64/Isel.zig
index faacf1ab3c..e6d8278fdc 100644
--- a/src/arch/x86_64/Isel.zig
+++ b/src/arch/x86_64/Isel.zig
@@ -227,8 +227,10 @@ fn mirPushPop(isel: *Isel, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
16 => .word_ptr,
else => .qword_ptr,
};
- return lowerToMEnc(tag, RegisterOrMemory.mem(ops.reg1, imm, ptr_size), isel.code) catch |err|
- isel.failWithLoweringError(err);
+ return lowerToMEnc(tag, RegisterOrMemory.mem(ptr_size, .{
+ .disp = imm,
+ .base = ops.reg1,
+ }), isel.code) catch |err| isel.failWithLoweringError(err);
},
0b10 => {
// PUSH imm32
@@ -284,7 +286,7 @@ fn mirJmpCall(isel: *Isel, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
16 => .word_ptr,
else => .qword_ptr,
};
- return lowerToMEnc(tag, RegisterOrMemory.mem(null, imm, ptr_size), isel.code) catch |err|
+ return lowerToMEnc(tag, RegisterOrMemory.mem(ptr_size, .{ .disp = imm }), isel.code) catch |err|
isel.failWithLoweringError(err);
}
// JMP/CALL reg
@@ -422,12 +424,10 @@ fn mirArith(isel: *Isel, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
// RM
const imm = isel.mir.instructions.items(.data)[inst].imm;
const src_reg: ?Register = if (ops.reg2 == .none) null else ops.reg2;
- return lowerToRmEnc(
- tag,
- ops.reg1,
- RegisterOrMemory.mem(src_reg, imm, Memory.PtrSize.fromBits(ops.reg1.size())),
- isel.code,
- ) catch |err| isel.failWithLoweringError(err);
+ return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.mem(Memory.PtrSize.fromBits(ops.reg1.size()), .{
+ .disp = imm,
+ .base = src_reg,
+ }), isel.code) catch |err| isel.failWithLoweringError(err);
},
0b10 => {
if (ops.reg2 == .none) {
@@ -436,12 +436,10 @@ fn mirArith(isel: *Isel, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
// mov [reg1 + imm32], reg2
// MR
const imm = isel.mir.instructions.items(.data)[inst].imm;
- return lowerToMrEnc(
- tag,
- RegisterOrMemory.mem(ops.reg1, imm, Memory.PtrSize.fromBits(ops.reg2.size())),
- ops.reg2,
- isel.code,
- ) catch |err| isel.failWithLoweringError(err);
+ return lowerToMrEnc(tag, RegisterOrMemory.mem(Memory.PtrSize.fromBits(ops.reg2.size()), .{
+ .disp = imm,
+ .base = ops.reg1,
+ }), ops.reg2, isel.code) catch |err| isel.failWithLoweringError(err);
},
0b11 => {
return isel.fail("TODO unused variant: mov reg1, reg2, 0b11", .{});
@@ -460,12 +458,10 @@ fn mirArithMemImm(isel: *Isel, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
0b10 => .dword_ptr,
0b11 => .qword_ptr,
};
- return lowerToMiEnc(
- tag,
- RegisterOrMemory.mem(ops.reg1, imm_pair.dest_off, ptr_size),
- imm_pair.operand,
- isel.code,
- ) catch |err| isel.failWithLoweringError(err);
+ return lowerToMiEnc(tag, RegisterOrMemory.mem(ptr_size, .{
+ .disp = imm_pair.dest_off,
+ .base = ops.reg1,
+ }), imm_pair.operand, isel.code) catch |err| isel.failWithLoweringError(err);
}
inline fn setRexWRegister(reg: Register) bool {
@@ -492,103 +488,61 @@ inline fn immOpSize(imm: i64) u8 {
return 64;
}
-// TODO
fn mirArithScaleSrc(isel: *Isel, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
const ops = Mir.Ops.decode(isel.mir.instructions.items(.ops)[inst]);
const scale = ops.flags;
- // OP reg1, [reg2 + scale*rcx + imm32]
- const opc = getOpCode(tag, .rm, ops.reg1.size() == 8).?;
const imm = isel.mir.instructions.items(.data)[inst].imm;
- const encoder = try Encoder.init(isel.code, 8);
- encoder.rex(.{
- .w = ops.reg1.size() == 64,
- .r = ops.reg1.isExtended(),
- .b = ops.reg2.isExtended(),
- });
- opc.encode(encoder);
- if (imm <= math.maxInt(i8)) {
- encoder.modRm_SIBDisp8(ops.reg1.lowId());
- encoder.sib_scaleIndexBaseDisp8(scale, Register.rcx.lowId(), ops.reg2.lowId());
- encoder.disp8(@intCast(i8, imm));
- } else {
- encoder.modRm_SIBDisp32(ops.reg1.lowId());
- encoder.sib_scaleIndexBaseDisp32(scale, Register.rcx.lowId(), ops.reg2.lowId());
- encoder.disp32(imm);
- }
+ // OP reg1, [reg2 + scale*rcx + imm32]
+ return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.mem(Memory.PtrSize.fromBits(ops.reg1.size()), .{
+ .disp = imm,
+ .base = ops.reg2,
+ .scale_index = .{
+ .scale = scale,
+ .index = .rcx,
+ },
+ }), isel.code) catch |err| isel.failWithLoweringError(err);
}
-// TODO
fn mirArithScaleDst(isel: *Isel, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
const ops = Mir.Ops.decode(isel.mir.instructions.items(.ops)[inst]);
const scale = ops.flags;
const imm = isel.mir.instructions.items(.data)[inst].imm;
-
if (ops.reg2 == .none) {
- // OP [reg1 + scale*rax + 0], imm32
- const opc = getOpCode(tag, .mi, ops.reg1.size() == 8).?;
- const modrm_ext = getModRmExt(tag).?;
- const encoder = try Encoder.init(isel.code, 8);
- encoder.rex(.{
- .w = ops.reg1.size() == 64,
- .b = ops.reg1.isExtended(),
- });
- opc.encode(encoder);
- encoder.modRm_SIBDisp0(modrm_ext);
- encoder.sib_scaleIndexBase(scale, Register.rax.lowId(), ops.reg1.lowId());
- if (imm <= math.maxInt(i8)) {
- encoder.imm8(@intCast(i8, imm));
- } else if (imm <= math.maxInt(i16)) {
- encoder.imm16(@intCast(i16, imm));
- } else {
- encoder.imm32(imm);
- }
- return;
+ // OP qword ptr [reg1 + scale*rax + 0], imm32
+ return lowerToMiEnc(tag, RegisterOrMemory.mem(.qword_ptr, .{
+ .disp = 0,
+ .base = ops.reg1,
+ .scale_index = .{
+ .scale = scale,
+ .index = .rax,
+ },
+ }), imm, isel.code) catch |err| isel.failWithLoweringError(err);
}
-
// OP [reg1 + scale*rax + imm32], reg2
- const opc = getOpCode(tag, .mr, ops.reg1.size() == 8).?;
- const encoder = try Encoder.init(isel.code, 8);
- encoder.rex(.{
- .w = ops.reg1.size() == 64,
- .r = ops.reg2.isExtended(),
- .b = ops.reg1.isExtended(),
- });
- opc.encode(encoder);
- if (imm <= math.maxInt(i8)) {
- encoder.modRm_SIBDisp8(ops.reg2.lowId());
- encoder.sib_scaleIndexBaseDisp8(scale, Register.rax.lowId(), ops.reg1.lowId());
- encoder.disp8(@intCast(i8, imm));
- } else {
- encoder.modRm_SIBDisp32(ops.reg2.lowId());
- encoder.sib_scaleIndexBaseDisp32(scale, Register.rax.lowId(), ops.reg1.lowId());
- encoder.disp32(imm);
- }
+ return lowerToMrEnc(tag, RegisterOrMemory.mem(Memory.PtrSize.fromBits(ops.reg2.size()), .{
+ .disp = imm,
+ .base = ops.reg1,
+ .scale_index = .{
+ .scale = scale,
+ .index = .rax,
+ },
+ }), ops.reg2, isel.code) catch |err| isel.failWithLoweringError(err);
}
-// TODO
fn mirArithScaleImm(isel: *Isel, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
const ops = Mir.Ops.decode(isel.mir.instructions.items(.ops)[inst]);
const scale = ops.flags;
const payload = isel.mir.instructions.items(.data)[inst].payload;
const imm_pair = isel.mir.extraData(Mir.ImmPair, payload).data;
- const opc = getOpCode(tag, .mi, ops.reg1.size() == 8).?;
- const modrm_ext = getModRmExt(tag).?;
- const encoder = try Encoder.init(isel.code, 2);
- encoder.rex(.{
- .w = ops.reg1.size() == 64,
- .b = ops.reg1.isExtended(),
- });
- opc.encode(encoder);
- if (imm_pair.dest_off <= math.maxInt(i8)) {
- encoder.modRm_SIBDisp8(modrm_ext);
- encoder.sib_scaleIndexBaseDisp8(scale, Register.rax.lowId(), ops.reg1.lowId());
- encoder.disp8(@intCast(i8, imm_pair.dest_off));
- } else {
- encoder.modRm_SIBDisp32(modrm_ext);
- encoder.sib_scaleIndexBaseDisp32(scale, Register.rax.lowId(), ops.reg1.lowId());
- encoder.disp32(imm_pair.dest_off);
- }
- encoder.imm32(imm_pair.operand);
+ // OP qword ptr [reg1 + scale*rax + imm32], imm32
+ return lowerToMiEnc(tag, RegisterOrMemory.mem(.qword_ptr, .{
+ .disp = imm_pair.dest_off,
+ .base = ops.reg1,
+ .scale_index = .{
+ .scale = scale,
+ .index = .rax,
+ },
+ }), imm_pair.operand, isel.code) catch |err| isel.failWithLoweringError(err);
}
fn mirMovabs(isel: *Isel, inst: Mir.Inst.Index) InnerError!void {
@@ -646,7 +600,10 @@ fn mirLea(isel: *Isel, inst: Mir.Inst.Index) InnerError!void {
return lowerToRmEnc(
.lea,
ops.reg1,
- RegisterOrMemory.mem(src_reg, imm, Memory.PtrSize.fromBits(ops.reg1.size())),
+ RegisterOrMemory.mem(Memory.PtrSize.fromBits(ops.reg1.size()), .{
+ .disp = imm,
+ .base = src_reg,
+ }),
isel.code,
) catch |err| isel.failWithLoweringError(err);
},
@@ -657,7 +614,7 @@ fn mirLea(isel: *Isel, inst: Mir.Inst.Index) InnerError!void {
lowerToRmEnc(
.lea,
ops.reg1,
- RegisterOrMemory.rip(0, Memory.PtrSize.fromBits(ops.reg1.size())),
+ RegisterOrMemory.rip(Memory.PtrSize.fromBits(ops.reg1.size()), 0),
isel.code,
) catch |err| return isel.failWithLoweringError(err);
const end_offset = isel.code.items.len;
@@ -673,7 +630,7 @@ fn mirLea(isel: *Isel, inst: Mir.Inst.Index) InnerError!void {
lowerToRmEnc(
.lea,
ops.reg1,
- RegisterOrMemory.rip(0, Memory.PtrSize.fromBits(ops.reg1.size())),
+ RegisterOrMemory.rip(Memory.PtrSize.fromBits(ops.reg1.size()), 0),
isel.code,
) catch |err| return isel.failWithLoweringError(err);
const end_offset = isel.code.items.len;
@@ -697,7 +654,24 @@ fn mirLea(isel: *Isel, inst: Mir.Inst.Index) InnerError!void {
);
}
},
- 0b11 => return isel.fail("TODO unused variant lea reg1, reg2, 0b11", .{}),
+ 0b11 => {
+ // lea reg, [rbp + rcx + imm32]
+ const imm = isel.mir.instructions.items(.data)[inst].imm;
+ const src_reg: ?Register = if (ops.reg2 == .none) null else ops.reg2;
+ return lowerToRmEnc(
+ .lea,
+ ops.reg1,
+ RegisterOrMemory.mem(Memory.PtrSize.fromBits(ops.reg1.size()), .{
+ .disp = imm,
+ .base = src_reg,
+ .scale_index = .{
+ .scale = 0,
+ .index = .rcx,
+ },
+ }),
+ isel.code,
+ ) catch |err| isel.failWithLoweringError(err);
+ },
}
}
@@ -1230,18 +1204,17 @@ inline fn getModRmExt(tag: Tag) ?u3 {
};
}
-const ScaleIndexBase = struct {
+const ScaleIndex = struct {
scale: u2,
- index_reg: ?Register,
- base_reg: ?Register,
+ index: Register,
};
const Memory = struct {
- reg: ?Register,
+ base: ?Register,
rip: bool = false,
disp: i32,
ptr_size: PtrSize,
- sib: ?ScaleIndexBase = null,
+ scale_index: ?ScaleIndex = null,
const PtrSize = enum {
byte_ptr,
@@ -1269,8 +1242,72 @@ const Memory = struct {
};
}
};
+
+ fn encode(mem_op: Memory, encoder: Encoder, operand: u3) void {
+ if (mem_op.base) |base| {
+ const dst = base.lowId();
+ const src = operand;
+ if (dst == 4 or mem_op.scale_index != null) {
+ if (mem_op.disp == 0) {
+ encoder.modRm_SIBDisp0(src);
+ if (mem_op.scale_index) |si| {
+ encoder.sib_scaleIndexBase(si.scale, si.index.lowId(), dst);
+ } else {
+ encoder.sib_base(dst);
+ }
+ } else if (immOpSize(mem_op.disp) == 8) {
+ encoder.modRm_SIBDisp8(src);
+ if (mem_op.scale_index) |si| {
+ encoder.sib_scaleIndexBaseDisp8(si.scale, si.index.lowId(), dst);
+ } else {
+ encoder.sib_baseDisp8(dst);
+ }
+ encoder.disp8(@intCast(i8, mem_op.disp));
+ } else {
+ encoder.modRm_SIBDisp32(src);
+ if (mem_op.scale_index) |si| {
+ encoder.sib_scaleIndexBaseDisp32(si.scale, si.index.lowId(), dst);
+ } else {
+ encoder.sib_baseDisp32(dst);
+ }
+ encoder.disp32(mem_op.disp);
+ }
+ } else {
+ if (mem_op.disp == 0) {
+ encoder.modRm_indirectDisp0(src, dst);
+ } else if (immOpSize(mem_op.disp) == 8) {
+ encoder.modRm_indirectDisp8(src, dst);
+ encoder.disp8(@intCast(i8, mem_op.disp));
+ } else {
+ encoder.modRm_indirectDisp32(src, dst);
+ encoder.disp32(mem_op.disp);
+ }
+ }
+ } else {
+ if (mem_op.rip) {
+ encoder.modRm_RIPDisp32(operand);
+ } else {
+ encoder.modRm_SIBDisp0(operand);
+ if (mem_op.scale_index) |si| {
+ encoder.sib_scaleIndexDisp32(si.scale, si.index.lowId());
+ } else {
+ encoder.sib_disp32();
+ }
+ }
+ encoder.disp32(mem_op.disp);
+ }
+ }
};
+fn encodeImm(encoder: Encoder, imm: i32, size: u64) void {
+ switch (size) {
+ 8 => encoder.imm8(@intCast(i8, imm)),
+ 16 => encoder.imm16(@intCast(i16, imm)),
+ 32, 64 => encoder.imm32(imm),
+ else => unreachable,
+ }
+}
+
const RegisterOrMemory = union(enum) {
register: Register,
memory: Memory,
@@ -1279,20 +1316,25 @@ const RegisterOrMemory = union(enum) {
return .{ .register = register };
}
- fn mem(register: ?Register, disp: i32, ptr_size: Memory.PtrSize) RegisterOrMemory {
+ fn mem(ptr_size: Memory.PtrSize, args: struct {
+ disp: i32,
+ base: ?Register = null,
+ scale_index: ?ScaleIndex = null,
+ }) RegisterOrMemory {
return .{
.memory = .{
- .reg = register,
- .disp = disp,
+ .base = args.base,
+ .disp = args.disp,
.ptr_size = ptr_size,
+ .scale_index = args.scale_index,
},
};
}
- fn rip(disp: i32, ptr_size: Memory.PtrSize) RegisterOrMemory {
+ fn rip(ptr_size: Memory.PtrSize, disp: i32) RegisterOrMemory {
return .{
.memory = .{
- .reg = null,
+ .base = null,
.rip = true,
.disp = disp,
.ptr_size = ptr_size,
@@ -1325,16 +1367,10 @@ fn lowerToIEnc(tag: Tag, imm: i32, code: *std.ArrayList(u8)) LoweringError!void
const opc = getOpCode(tag, .i, immOpSize(imm) == 8).?;
const encoder = try Encoder.init(code, 5);
if (immOpSize(imm) == 16) {
- encoder.opcode_1byte(0x66);
+ encoder.prefix16BitMode();
}
opc.encode(encoder);
- if (immOpSize(imm) == 8) {
- encoder.imm8(@intCast(i8, imm));
- } else if (immOpSize(imm) == 16) {
- encoder.imm16(@intCast(i16, imm));
- } else {
- encoder.imm32(imm);
- }
+ encodeImm(encoder, imm, immOpSize(imm));
}
fn lowerToOEnc(tag: Tag, reg: Register, code: *std.ArrayList(u8)) LoweringError!void {
@@ -1344,7 +1380,7 @@ fn lowerToOEnc(tag: Tag, reg: Register, code: *std.ArrayList(u8)) LoweringError!
const opc = getOpCode(tag, .o, false).?;
const encoder = try Encoder.init(code, 3);
if (reg.size() == 16) {
- encoder.opcode_1byte(0x66);
+ encoder.prefix16BitMode();
}
encoder.rex(.{
.w = false,
@@ -1375,7 +1411,7 @@ fn lowerToMEnc(tag: Tag, reg_or_mem: RegisterOrMemory, code: *std.ArrayList(u8))
}
const encoder = try Encoder.init(code, 4);
if (reg.size() == 16) {
- encoder.opcode_1byte(0x66);
+ encoder.prefix16BitMode();
}
encoder.rex(.{
.w = switch (reg) {
@@ -1393,51 +1429,19 @@ fn lowerToMEnc(tag: Tag, reg_or_mem: RegisterOrMemory, code: *std.ArrayList(u8))
}
const encoder = try Encoder.init(code, 8);
if (mem_op.ptr_size == .word_ptr) {
- encoder.opcode_1byte(0x66);
+ encoder.prefix16BitMode();
}
- if (mem_op.reg) |reg| {
- if (reg.size() != 64) {
+ if (mem_op.base) |base| {
+ if (base.size() != 64) {
return error.OperandSizeMismatch;
}
encoder.rex(.{
.w = false,
- .b = reg.isExtended(),
+ .b = base.isExtended(),
});
- opc.encode(encoder);
- if (reg.lowId() == 4) {
- if (mem_op.disp == 0) {
- encoder.modRm_SIBDisp0(modrm_ext);
- encoder.sib_base(reg.lowId());
- } else if (immOpSize(mem_op.disp) == 8) {
- encoder.modRm_SIBDisp8(modrm_ext);
- encoder.sib_baseDisp8(reg.lowId());
- encoder.disp8(@intCast(i8, mem_op.disp));
- } else {
- encoder.modRm_SIBDisp32(modrm_ext);
- encoder.sib_baseDisp32(reg.lowId());
- encoder.disp32(mem_op.disp);
- }
- } else {
- if (mem_op.disp == 0) {
- encoder.modRm_indirectDisp0(modrm_ext, reg.lowId());
- } else if (immOpSize(mem_op.disp) == 8) {
- encoder.modRm_indirectDisp8(modrm_ext, reg.lowId());
- encoder.disp8(@intCast(i8, mem_op.disp));
- } else {
- encoder.modRm_indirectDisp32(modrm_ext, reg.lowId());
- encoder.disp32(mem_op.disp);
- }
- }
- } else {
- opc.encode(encoder);
- if (mem_op.rip) {
- encoder.modRm_RIPDisp32(modrm_ext);
- } else {
- encoder.modRm_SIBDisp0(modrm_ext);
- encoder.sib_disp32();
- }
- encoder.disp32(mem_op.disp);
}
+ opc.encode(encoder);
+ mem_op.encode(encoder, modrm_ext);
},
}
}
@@ -1463,7 +1467,7 @@ fn lowerToTdFdEnc(tag: Tag, reg: Register, moffs: i64, code: *std.ArrayList(u8),
getOpCode(tag, .fd, reg.size() == 8).?;
const encoder = try Encoder.init(code, 10);
if (reg.size() == 16) {
- encoder.opcode_1byte(0x66);
+ encoder.prefix16BitMode();
}
encoder.rex(.{
.w = setRexWRegister(reg),
@@ -1496,7 +1500,7 @@ fn lowerToOiEnc(tag: Tag, reg: Register, imm: i64, code: *std.ArrayList(u8)) Low
const opc = getOpCode(tag, .oi, reg.size() == 8).?;
const encoder = try Encoder.init(code, 10);
if (reg.size() == 16) {
- encoder.opcode_1byte(0x66);
+ encoder.prefix16BitMode();
}
encoder.rex(.{
.w = setRexWRegister(reg),
@@ -1533,7 +1537,7 @@ fn lowerToMiEnc(tag: Tag, reg_or_mem: RegisterOrMemory, imm: i32, code: *std.Arr
// 0x66 prefix switches to the non-default size; here we assume a switch from
// the default 32bits to 16bits operand-size.
// More info: https://www.cs.uni-potsdam.de/desn/lehre/ss15/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf#page=32&zoom=auto,-159,773
- encoder.opcode_1byte(0x66);
+ encoder.prefix16BitMode();
}
encoder.rex(.{
.w = setRexWRegister(dst_reg),
@@ -1541,81 +1545,30 @@ fn lowerToMiEnc(tag: Tag, reg_or_mem: RegisterOrMemory, imm: i32, code: *std.Arr
});
opc.encode(encoder);
encoder.modRm_direct(modrm_ext, dst_reg.lowId());
- switch (dst_reg.size()) {
- 8 => {
- const imm8 = try math.cast(i8, imm);
- encoder.imm8(imm8);
- },
- 16 => {
- const imm16 = try math.cast(i16, imm);
- encoder.imm16(imm16);
- },
- 32, 64 => encoder.imm32(imm),
- else => unreachable,
- }
+ encodeImm(encoder, imm, dst_reg.size());
},
.memory => |dst_mem| {
const opc = getOpCode(tag, .mi, dst_mem.ptr_size == .byte_ptr).?;
const encoder = try Encoder.init(code, 12);
if (dst_mem.ptr_size == .word_ptr) {
- encoder.opcode_1byte(0x66);
+ encoder.prefix16BitMode();
}
- if (dst_mem.reg) |dst_reg| {
- if (dst_reg.size() != 64) {
+ if (dst_mem.base) |base| {
+ if (base.size() != 64) {
return error.OperandSizeMismatch;
}
encoder.rex(.{
.w = dst_mem.ptr_size == .qword_ptr,
- .b = dst_reg.isExtended(),
+ .b = base.isExtended(),
});
- opc.encode(encoder);
- if (dst_reg.lowId() == 4) {
- if (dst_mem.disp == 0) {
- encoder.modRm_SIBDisp0(modrm_ext);
- encoder.sib_base(dst_reg.lowId());
- } else if (immOpSize(dst_mem.disp) == 8) {
- encoder.modRm_SIBDisp8(modrm_ext);
- encoder.sib_baseDisp8(dst_reg.lowId());
- encoder.disp8(@intCast(i8, dst_mem.disp));
- } else {
- encoder.modRm_SIBDisp32(modrm_ext);
- encoder.sib_baseDisp32(dst_reg.lowId());
- encoder.disp32(dst_mem.disp);
- }
- } else {
- if (dst_mem.disp == 0) {
- encoder.modRm_indirectDisp0(modrm_ext, dst_reg.lowId());
- } else if (immOpSize(dst_mem.disp) == 8) {
- encoder.modRm_indirectDisp8(modrm_ext, dst_reg.lowId());
- encoder.disp8(@intCast(i8, dst_mem.disp));
- } else {
- encoder.modRm_indirectDisp32(modrm_ext, dst_reg.lowId());
- encoder.disp32(dst_mem.disp);
- }
- }
} else {
- opc.encode(encoder);
- if (dst_mem.rip) {
- encoder.modRm_RIPDisp32(modrm_ext);
- } else {
- encoder.modRm_SIBDisp0(modrm_ext);
- encoder.sib_disp32();
- }
- encoder.disp32(dst_mem.disp);
- }
- switch (dst_mem.ptr_size) {
- .byte_ptr => {
- const imm8 = try math.cast(i8, imm);
- encoder.imm8(imm8);
- },
- .word_ptr => {
- const imm16 = try math.cast(i16, imm);
- encoder.imm16(imm16);
- },
- .dword_ptr, .qword_ptr => {
- encoder.imm32(imm);
- },
+ encoder.rex(.{
+ .w = dst_mem.ptr_size == .qword_ptr,
+ });
}
+ opc.encode(encoder);
+ dst_mem.encode(encoder, modrm_ext);
+ encodeImm(encoder, imm, dst_mem.ptr_size.size());
},
}
}
@@ -1647,58 +1600,27 @@ fn lowerToRmEnc(
}
const encoder = try Encoder.init(code, 9);
if (reg.size() == 16) {
- encoder.opcode_1byte(0x66);
+ encoder.prefix16BitMode();
}
- if (src_mem.reg) |src_reg| {
+ if (src_mem.base) |base| {
// TODO handle 32-bit base register - requires prefix 0x67
// Intel Manual, Vol 1, chapter 3.6 and 3.6.1
- if (src_reg.size() != 64) {
+ if (base.size() != 64) {
return error.OperandSizeMismatch;
}
encoder.rex(.{
.w = setRexWRegister(reg),
.r = reg.isExtended(),
- .b = src_reg.isExtended(),
+ .b = base.isExtended(),
});
- opc.encode(encoder);
- if (src_reg.lowId() == 4) {
- if (src_mem.disp == 0) {
- encoder.modRm_SIBDisp0(reg.lowId());
- encoder.sib_base(src_reg.lowId());
- } else if (immOpSize(src_mem.disp) == 8) {
- encoder.modRm_SIBDisp8(reg.lowId());
- encoder.sib_baseDisp8(src_reg.lowId());
- encoder.disp8(@intCast(i8, src_mem.disp));
- } else {
- encoder.modRm_SIBDisp32(reg.lowId());
- encoder.sib_baseDisp32(src_reg.lowId());
- encoder.disp32(src_mem.disp);
- }
- } else {
- if (src_mem.disp == 0) {
- encoder.modRm_indirectDisp0(reg.lowId(), src_reg.lowId());
- } else if (immOpSize(src_mem.disp) == 8) {
- encoder.modRm_indirectDisp8(reg.lowId(), src_reg.lowId());
- encoder.disp8(@intCast(i8, src_mem.disp));
- } else {
- encoder.modRm_indirectDisp32(reg.lowId(), src_reg.lowId());
- encoder.disp32(src_mem.disp);
- }
- }
} else {
encoder.rex(.{
.w = setRexWRegister(reg),
.r = reg.isExtended(),
});
- opc.encode(encoder);
- if (src_mem.rip) {
- encoder.modRm_RIPDisp32(reg.lowId());
- } else {
- encoder.modRm_SIBDisp0(reg.lowId());
- encoder.sib_disp32();
- }
- encoder.disp32(src_mem.disp);
}
+ opc.encode(encoder);
+ src_mem.encode(encoder, reg.lowId());
},
}
}
@@ -1730,56 +1652,25 @@ fn lowerToMrEnc(
}
const encoder = try Encoder.init(code, 9);
if (reg.size() == 16) {
- encoder.opcode_1byte(0x66);
+ encoder.prefix16BitMode();
}
- if (dst_mem.reg) |dst_reg| {
- if (dst_reg.size() != 64) {
+ if (dst_mem.base) |base| {
+ if (base.size() != 64) {
return error.OperandSizeMismatch;
}
encoder.rex(.{
.w = dst_mem.ptr_size == .qword_ptr or setRexWRegister(reg),
.r = reg.isExtended(),
- .b = dst_reg.isExtended(),
+ .b = base.isExtended(),
});
- opc.encode(encoder);
- if (dst_reg.lowId() == 4) {
- if (dst_mem.disp == 0) {
- encoder.modRm_SIBDisp0(reg.lowId());
- encoder.sib_base(dst_reg.lowId());
- } else if (immOpSize(dst_mem.disp) == 8) {
- encoder.modRm_SIBDisp8(reg.lowId());
- encoder.sib_baseDisp8(dst_reg.lowId());
- encoder.disp8(@intCast(i8, dst_mem.disp));
- } else {
- encoder.modRm_SIBDisp32(reg.lowId());
- encoder.sib_baseDisp32(dst_reg.lowId());
- encoder.disp32(dst_mem.disp);
- }
- } else {
- if (dst_mem.disp == 0) {
- encoder.modRm_indirectDisp0(reg.lowId(), dst_reg.lowId());
- } else if (immOpSize(dst_mem.disp) == 8) {
- encoder.modRm_indirectDisp8(reg.lowId(), dst_reg.lowId());
- encoder.disp8(@intCast(i8, dst_mem.disp));
- } else {
- encoder.modRm_indirectDisp32(reg.lowId(), dst_reg.lowId());
- encoder.disp32(dst_mem.disp);
- }
- }
} else {
encoder.rex(.{
.w = dst_mem.ptr_size == .qword_ptr or setRexWRegister(reg),
.r = reg.isExtended(),
});
- opc.encode(encoder);
- if (dst_mem.rip) {
- encoder.modRm_RIPDisp32(reg.lowId());
- } else {
- encoder.modRm_SIBDisp0(reg.lowId());
- encoder.sib_disp32();
- }
- encoder.disp32(dst_mem.disp);
}
+ opc.encode(encoder);
+ dst_mem.encode(encoder, reg.lowId());
},
}
}
@@ -1797,7 +1688,7 @@ fn lowerToRmiEnc(
const opc = getOpCode(tag, .rmi, false).?;
const encoder = try Encoder.init(code, 13);
if (reg.size() == 16) {
- encoder.opcode_1byte(0x66);
+ encoder.prefix16BitMode();
}
switch (reg_or_mem) {
.register => |src_reg| {
@@ -1813,10 +1704,10 @@ fn lowerToRmiEnc(
encoder.modRm_direct(reg.lowId(), src_reg.lowId());
},
.memory => |src_mem| {
- if (src_mem.reg) |src_reg| {
+ if (src_mem.base) |base| {
// TODO handle 32-bit base register - requires prefix 0x67
// Intel Manual, Vol 1, chapter 3.6 and 3.6.1
- if (src_reg.size() != 64) {
+ if (base.size() != 64) {
return error.OperandSizeMismatch;
}
if (src_mem.ptr_size == .byte_ptr) {
@@ -1825,59 +1716,19 @@ fn lowerToRmiEnc(
encoder.rex(.{
.w = setRexWRegister(reg),
.r = reg.isExtended(),
- .b = src_reg.isExtended(),
+ .b = base.isExtended(),
});
- opc.encode(encoder);
- if (src_reg.lowId() == 4) {
- if (src_mem.disp == 0) {
- encoder.modRm_SIBDisp0(reg.lowId());
- encoder.sib_base(src_reg.lowId());
- } else if (immOpSize(src_mem.disp) == 8) {
- encoder.modRm_SIBDisp8(reg.lowId());
- encoder.sib_baseDisp8(src_reg.lowId());
- encoder.disp8(@intCast(i8, src_mem.disp));
- } else {
- encoder.modRm_SIBDisp32(reg.lowId());
- encoder.sib_baseDisp32(src_reg.lowId());
- encoder.disp32(src_mem.disp);
- }
- } else {
- if (src_mem.disp == 0) {
- encoder.modRm_indirectDisp0(reg.lowId(), src_reg.lowId());
- } else if (immOpSize(src_mem.disp) == 8) {
- encoder.modRm_indirectDisp8(reg.lowId(), src_reg.lowId());
- encoder.disp8(@intCast(i8, src_mem.disp));
- } else {
- encoder.modRm_indirectDisp32(reg.lowId(), src_reg.lowId());
- encoder.disp32(src_mem.disp);
- }
- }
} else {
encoder.rex(.{
.w = setRexWRegister(reg),
.r = reg.isExtended(),
});
- opc.encode(encoder);
- if (src_mem.rip) {
- encoder.modRm_RIPDisp32(reg.lowId());
- } else {
- encoder.modRm_SIBDisp0(reg.lowId());
- encoder.sib_disp32();
- }
- encoder.disp32(src_mem.disp);
}
+ opc.encode(encoder);
+ src_mem.encode(encoder, reg.lowId());
},
}
- switch (reg.size()) {
- // TODO 8bit immediate
- 8 => unreachable,
- 16 => {
- const imm16 = try math.cast(i16, imm);
- encoder.imm16(imm16);
- },
- 32, 64 => encoder.imm32(imm),
- else => unreachable,
- }
+ encodeImm(encoder, imm, reg.size());
}
fn expectEqualHexStrings(expected: []const u8, given: []const u8, assembly: []const u8) !void {
@@ -1930,44 +1781,62 @@ test "lower MI encoding" {
defer isel.deinit();
try lowerToMiEnc(.mov, RegisterOrMemory.reg(.rax), 0x10, isel.code());
try expectEqualHexStrings("\x48\xc7\xc0\x10\x00\x00\x00", isel.lowered(), "mov rax, 0x10");
- try lowerToMiEnc(.mov, RegisterOrMemory.mem(.r11, 0, .dword_ptr), 0x10, isel.code());
+ try lowerToMiEnc(.mov, RegisterOrMemory.mem(.dword_ptr, .{ .disp = 0, .base = .r11 }), 0x10, isel.code());
try expectEqualHexStrings("\x41\xc7\x03\x10\x00\x00\x00", isel.lowered(), "mov dword ptr [r11 + 0], 0x10");
- try lowerToMiEnc(.add, RegisterOrMemory.mem(.rdx, -8, .dword_ptr), 0x10, isel.code());
+ try lowerToMiEnc(.add, RegisterOrMemory.mem(.dword_ptr, .{ .disp = -8, .base = .rdx }), 0x10, isel.code());
try expectEqualHexStrings("\x81\x42\xF8\x10\x00\x00\x00", isel.lowered(), "add dword ptr [rdx - 8], 0x10");
- try lowerToMiEnc(.sub, RegisterOrMemory.mem(.r11, 0x10000000, .dword_ptr), 0x10, isel.code());
+ try lowerToMiEnc(.sub, RegisterOrMemory.mem(.dword_ptr, .{
+ .disp = 0x10000000,
+ .base = .r11,
+ }), 0x10, isel.code());
try expectEqualHexStrings(
"\x41\x81\xab\x00\x00\x00\x10\x10\x00\x00\x00",
isel.lowered(),
"sub dword ptr [r11 + 0x10000000], 0x10",
);
- try lowerToMiEnc(.@"and", RegisterOrMemory.mem(null, 0x10000000, .dword_ptr), 0x10, isel.code());
+ try lowerToMiEnc(.@"and", RegisterOrMemory.mem(.dword_ptr, .{ .disp = 0x10000000 }), 0x10, isel.code());
try expectEqualHexStrings(
"\x81\x24\x25\x00\x00\x00\x10\x10\x00\x00\x00",
isel.lowered(),
"and dword ptr [ds:0x10000000], 0x10",
);
- try lowerToMiEnc(.@"and", RegisterOrMemory.mem(.r12, 0x10000000, .dword_ptr), 0x10, isel.code());
+ try lowerToMiEnc(.@"and", RegisterOrMemory.mem(.dword_ptr, .{
+ .disp = 0x10000000,
+ .base = .r12,
+ }), 0x10, isel.code());
try expectEqualHexStrings(
"\x41\x81\xA4\x24\x00\x00\x00\x10\x10\x00\x00\x00",
isel.lowered(),
"and dword ptr [r12 + 0x10000000], 0x10",
);
- try lowerToMiEnc(.mov, RegisterOrMemory.rip(0x10, .qword_ptr), 0x10, isel.code());
+ try lowerToMiEnc(.mov, RegisterOrMemory.rip(.qword_ptr, 0x10), 0x10, isel.code());
try expectEqualHexStrings(
- "\xC7\x05\x10\x00\x00\x00\x10\x00\x00\x00",
+ "\x48\xC7\x05\x10\x00\x00\x00\x10\x00\x00\x00",
isel.lowered(),
"mov qword ptr [rip + 0x10], 0x10",
);
- try lowerToMiEnc(.mov, RegisterOrMemory.mem(.rbp, -8, .qword_ptr), 0x10, isel.code());
+ try lowerToMiEnc(.mov, RegisterOrMemory.mem(.qword_ptr, .{ .disp = -8, .base = .rbp }), 0x10, isel.code());
try expectEqualHexStrings(
"\x48\xc7\x45\xf8\x10\x00\x00\x00",
isel.lowered(),
"mov qword ptr [rbp - 8], 0x10",
);
- try lowerToMiEnc(.mov, RegisterOrMemory.mem(.rbp, -2, .word_ptr), 0x10, isel.code());
+ try lowerToMiEnc(.mov, RegisterOrMemory.mem(.word_ptr, .{ .disp = -2, .base = .rbp }), 0x10, isel.code());
try expectEqualHexStrings("\x66\xC7\x45\xFE\x10\x00", isel.lowered(), "mov word ptr [rbp - 2], 0x10");
- try lowerToMiEnc(.mov, RegisterOrMemory.mem(.rbp, -1, .byte_ptr), 0x10, isel.code());
+ try lowerToMiEnc(.mov, RegisterOrMemory.mem(.byte_ptr, .{ .disp = -1, .base = .rbp }), 0x10, isel.code());
try expectEqualHexStrings("\xC6\x45\xFF\x10", isel.lowered(), "mov byte ptr [rbp - 1], 0x10");
+ try lowerToMiEnc(.mov, RegisterOrMemory.mem(.qword_ptr, .{
+ .disp = 0x10000000,
+ .scale_index = .{
+ .scale = 1,
+ .index = .rcx,
+ },
+ }), 0x10, isel.code());
+ try expectEqualHexStrings(
+ "\x48\xC7\x04\x4D\x00\x00\x00\x10\x10\x00\x00\x00",
+ isel.lowered(),
+ "mov qword ptr [rcx*2 + 0x10000000], 0x10",
+ );
}
test "lower RM encoding" {
@@ -1975,36 +1844,69 @@ test "lower RM encoding" {
defer isel.deinit();
try lowerToRmEnc(.mov, .rax, RegisterOrMemory.reg(.rbx), isel.code());
try expectEqualHexStrings("\x48\x8b\xc3", isel.lowered(), "mov rax, rbx");
- try lowerToRmEnc(.mov, .rax, RegisterOrMemory.mem(.r11, 0, .qword_ptr), isel.code());
+ try lowerToRmEnc(.mov, .rax, RegisterOrMemory.mem(.qword_ptr, .{ .disp = 0, .base = .r11 }), isel.code());
try expectEqualHexStrings("\x49\x8b\x03", isel.lowered(), "mov rax, qword ptr [r11 + 0]");
- try lowerToRmEnc(.add, .r11, RegisterOrMemory.mem(null, 0x10000000, .qword_ptr), isel.code());
+ try lowerToRmEnc(.add, .r11, RegisterOrMemory.mem(.qword_ptr, .{ .disp = 0x10000000 }), isel.code());
try expectEqualHexStrings(
"\x4C\x03\x1C\x25\x00\x00\x00\x10",
isel.lowered(),
"add r11, qword ptr [ds:0x10000000]",
);
- try lowerToRmEnc(.add, .r12b, RegisterOrMemory.mem(null, 0x10000000, .byte_ptr), isel.code());
+ try lowerToRmEnc(.add, .r12b, RegisterOrMemory.mem(.byte_ptr, .{ .disp = 0x10000000 }), isel.code());
try expectEqualHexStrings(
"\x44\x02\x24\x25\x00\x00\x00\x10",
isel.lowered(),
"add r11b, byte ptr [ds:0x10000000]",
);
- try lowerToRmEnc(.sub, .r11, RegisterOrMemory.mem(.r13, 0x10000000, .qword_ptr), isel.code());
+ try lowerToRmEnc(.sub, .r11, RegisterOrMemory.mem(.qword_ptr, .{
+ .disp = 0x10000000,
+ .base = .r13,
+ }), isel.code());
try expectEqualHexStrings(
"\x4D\x2B\x9D\x00\x00\x00\x10",
isel.lowered(),
"sub r11, qword ptr [r13 + 0x10000000]",
);
- try lowerToRmEnc(.sub, .r11, RegisterOrMemory.mem(.r12, 0x10000000, .qword_ptr), isel.code());
+ try lowerToRmEnc(.sub, .r11, RegisterOrMemory.mem(.qword_ptr, .{
+ .disp = 0x10000000,
+ .base = .r12,
+ }), isel.code());
try expectEqualHexStrings(
"\x4D\x2B\x9C\x24\x00\x00\x00\x10",
isel.lowered(),
"sub r11, qword ptr [r12 + 0x10000000]",
);
- try lowerToRmEnc(.mov, .rax, RegisterOrMemory.mem(.rbp, -4, .qword_ptr), isel.code());
+ try lowerToRmEnc(.mov, .rax, RegisterOrMemory.mem(.qword_ptr, .{ .disp = -4, .base = .rbp }), isel.code());
try expectEqualHexStrings("\x48\x8B\x45\xFC", isel.lowered(), "mov rax, qword ptr [rbp - 4]");
- try lowerToRmEnc(.lea, .rax, RegisterOrMemory.rip(0x10, .qword_ptr), isel.code());
+ try lowerToRmEnc(.lea, .rax, RegisterOrMemory.rip(.qword_ptr, 0x10), isel.code());
try expectEqualHexStrings("\x48\x8D\x05\x10\x00\x00\x00", isel.lowered(), "lea rax, [rip + 0x10]");
+ try lowerToRmEnc(.mov, .rax, RegisterOrMemory.mem(.qword_ptr, .{
+ .disp = -8,
+ .base = .rbp,
+ .scale_index = .{
+ .scale = 0,
+ .index = .rcx,
+ },
+ }), isel.code());
+ try expectEqualHexStrings("\x48\x8B\x44\x0D\xF8", isel.lowered(), "mov rax, qword ptr [rbp + rcx*1 - 8]");
+ try lowerToRmEnc(.mov, .eax, RegisterOrMemory.mem(.dword_ptr, .{
+ .disp = -4,
+ .base = .rbp,
+ .scale_index = .{
+ .scale = 2,
+ .index = .rdx,
+ },
+ }), isel.code());
+ try expectEqualHexStrings("\x8B\x44\x95\xFC", isel.lowered(), "mov eax, dword ptr [rbp + rdx*4 - 4]");
+ try lowerToRmEnc(.mov, .rax, RegisterOrMemory.mem(.qword_ptr, .{
+ .disp = -8,
+ .base = .rbp,
+ .scale_index = .{
+ .scale = 3,
+ .index = .rcx,
+ },
+ }), isel.code());
+ try expectEqualHexStrings("\x48\x8B\x44\xCD\xF8", isel.lowered(), "mov rax, qword ptr [rbp + rcx*8 - 8]");
}
test "lower MR encoding" {
@@ -2012,27 +1914,30 @@ test "lower MR encoding" {
defer isel.deinit();
try lowerToMrEnc(.mov, RegisterOrMemory.reg(.rax), .rbx, isel.code());
try expectEqualHexStrings("\x48\x89\xd8", isel.lowered(), "mov rax, rbx");
- try lowerToMrEnc(.mov, RegisterOrMemory.mem(.rbp, -4, .qword_ptr), .r11, isel.code());
+ try lowerToMrEnc(.mov, RegisterOrMemory.mem(.qword_ptr, .{ .disp = -4, .base = .rbp }), .r11, isel.code());
try expectEqualHexStrings("\x4c\x89\x5d\xfc", isel.lowered(), "mov qword ptr [rbp - 4], r11");
- try lowerToMrEnc(.add, RegisterOrMemory.mem(null, 0x10000000, .byte_ptr), .r12b, isel.code());
+ try lowerToMrEnc(.add, RegisterOrMemory.mem(.byte_ptr, .{ .disp = 0x10000000 }), .r12b, isel.code());
try expectEqualHexStrings(
"\x44\x00\x24\x25\x00\x00\x00\x10",
isel.lowered(),
"add byte ptr [ds:0x10000000], r12b",
);
- try lowerToMrEnc(.add, RegisterOrMemory.mem(null, 0x10000000, .dword_ptr), .r12d, isel.code());
+ try lowerToMrEnc(.add, RegisterOrMemory.mem(.dword_ptr, .{ .disp = 0x10000000 }), .r12d, isel.code());
try expectEqualHexStrings(
"\x44\x01\x24\x25\x00\x00\x00\x10",
isel.lowered(),
"add dword ptr [ds:0x10000000], r12d",
);
- try lowerToMrEnc(.sub, RegisterOrMemory.mem(.r11, 0x10000000, .qword_ptr), .r12, isel.code());
+ try lowerToMrEnc(.sub, RegisterOrMemory.mem(.qword_ptr, .{
+ .disp = 0x10000000,
+ .base = .r11,
+ }), .r12, isel.code());
try expectEqualHexStrings(
"\x4D\x29\xA3\x00\x00\x00\x10",
isel.lowered(),
"sub qword ptr [r11 + 0x10000000], r12",
);
- try lowerToMrEnc(.mov, RegisterOrMemory.rip(0x10, .qword_ptr), .r12, isel.code());
+ try lowerToMrEnc(.mov, RegisterOrMemory.rip(.qword_ptr, 0x10), .r12, isel.code());
try expectEqualHexStrings("\x4C\x89\x25\x10\x00\x00\x00", isel.lowered(), "mov qword ptr [rip + 0x10], r12");
}
@@ -2083,21 +1988,24 @@ test "lower M encoding" {
try expectEqualHexStrings("\x41\xFF\xE4", isel.lowered(), "jmp r12");
try lowerToMEnc(.jmp_near, RegisterOrMemory.reg(.r12w), isel.code());
try expectEqualHexStrings("\x66\x41\xFF\xE4", isel.lowered(), "jmp r12w");
- try lowerToMEnc(.jmp_near, RegisterOrMemory.mem(.r12, 0, .qword_ptr), isel.code());
+ try lowerToMEnc(.jmp_near, RegisterOrMemory.mem(.qword_ptr, .{ .disp = 0, .base = .r12 }), isel.code());
try expectEqualHexStrings("\x41\xFF\x24\x24", isel.lowered(), "jmp qword ptr [r12]");
- try lowerToMEnc(.jmp_near, RegisterOrMemory.mem(.r12, 0, .word_ptr), isel.code());
+ try lowerToMEnc(.jmp_near, RegisterOrMemory.mem(.word_ptr, .{ .disp = 0, .base = .r12 }), isel.code());
try expectEqualHexStrings("\x66\x41\xFF\x24\x24", isel.lowered(), "jmp word ptr [r12]");
- try lowerToMEnc(.jmp_near, RegisterOrMemory.mem(.r12, 0x10, .qword_ptr), isel.code());
+ try lowerToMEnc(.jmp_near, RegisterOrMemory.mem(.qword_ptr, .{ .disp = 0x10, .base = .r12 }), isel.code());
try expectEqualHexStrings("\x41\xFF\x64\x24\x10", isel.lowered(), "jmp qword ptr [r12 + 0x10]");
- try lowerToMEnc(.jmp_near, RegisterOrMemory.mem(.r12, 0x1000, .qword_ptr), isel.code());
+ try lowerToMEnc(.jmp_near, RegisterOrMemory.mem(.qword_ptr, .{
+ .disp = 0x1000,
+ .base = .r12,
+ }), isel.code());
try expectEqualHexStrings(
"\x41\xFF\xA4\x24\x00\x10\x00\x00",
isel.lowered(),
"jmp qword ptr [r12 + 0x1000]",
);
- try lowerToMEnc(.jmp_near, RegisterOrMemory.rip(0x10, .qword_ptr), isel.code());
+ try lowerToMEnc(.jmp_near, RegisterOrMemory.rip(.qword_ptr, 0x10), isel.code());
try expectEqualHexStrings("\xFF\x25\x10\x00\x00\x00", isel.lowered(), "jmp qword ptr [rip + 0x10]");
- try lowerToMEnc(.jmp_near, RegisterOrMemory.mem(null, 0x10, .qword_ptr), isel.code());
+ try lowerToMEnc(.jmp_near, RegisterOrMemory.mem(.qword_ptr, .{ .disp = 0x10 }), isel.code());
try expectEqualHexStrings("\xFF\x24\x25\x10\x00\x00\x00", isel.lowered(), "jmp qword ptr [ds:0x10]");
try lowerToMEnc(.seta, RegisterOrMemory.reg(.r11b), isel.code());
try expectEqualHexStrings("\x41\x0F\x97\xC3", isel.lowered(), "seta r11b");
@@ -2115,15 +2023,24 @@ test "lower O encoding" {
test "lower RMI encoding" {
var isel = TestIsel.init();
defer isel.deinit();
- try lowerToRmiEnc(.imul, .rax, RegisterOrMemory.mem(.rbp, -8, .qword_ptr), 0x10, isel.code());
+ try lowerToRmiEnc(.imul, .rax, RegisterOrMemory.mem(.qword_ptr, .{
+ .disp = -8,
+ .base = .rbp,
+ }), 0x10, isel.code());
try expectEqualHexStrings(
"\x48\x69\x45\xF8\x10\x00\x00\x00",
isel.lowered(),
"imul rax, qword ptr [rbp - 8], 0x10",
);
- try lowerToRmiEnc(.imul, .eax, RegisterOrMemory.mem(.rbp, -4, .dword_ptr), 0x10, isel.code());
+ try lowerToRmiEnc(.imul, .eax, RegisterOrMemory.mem(.dword_ptr, .{
+ .disp = -4,
+ .base = .rbp,
+ }), 0x10, isel.code());
try expectEqualHexStrings("\x69\x45\xFC\x10\x00\x00\x00", isel.lowered(), "imul eax, dword ptr [rbp - 4], 0x10");
- try lowerToRmiEnc(.imul, .ax, RegisterOrMemory.mem(.rbp, -2, .word_ptr), 0x10, isel.code());
+ try lowerToRmiEnc(.imul, .ax, RegisterOrMemory.mem(.word_ptr, .{
+ .disp = -2,
+ .base = .rbp,
+ }), 0x10, isel.code());
try expectEqualHexStrings("\x66\x69\x45\xFE\x10\x00", isel.lowered(), "imul ax, word ptr [rbp - 2], 0x10");
try lowerToRmiEnc(.imul, .r12, RegisterOrMemory.reg(.r12), 0x10, isel.code());
try expectEqualHexStrings("\x4D\x69\xE4\x10\x00\x00\x00", isel.lowered(), "imul r12, r12, 0x10");
diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig
index d933181a6e..828addabfa 100644
--- a/src/arch/x86_64/Mir.zig
+++ b/src/arch/x86_64/Mir.zig
@@ -167,6 +167,7 @@ pub const Inst = struct {
/// 0b00 reg1, [ds:imm32]
/// 0b01 reg1, [rip + imm32]
/// 0b10 reg1, [rip + reloc]
+ /// 0b11 reg1, [reg2 + rcx + imm32]
/// Notes:
/// * if flags are 0b10, `Data` contains `got_entry` for the linker to generate
/// a valid relocation for.