diff options
| author | Jacob Young <jacobly0@users.noreply.github.com> | 2023-10-28 20:06:32 -0400 |
|---|---|---|
| committer | Jacob Young <jacobly0@users.noreply.github.com> | 2023-10-29 05:17:43 -0400 |
| commit | 1fecf86ebfa352927e2e3aedfd16b0755482e96e (patch) | |
| tree | dfc2eab0453bcad4cf2a9f80acecaddbbb3ce3fb /src/arch/x86_64/CodeGen.zig | |
| parent | 20ade4ce7f4e738344dbf370d5df769d2a979908 (diff) | |
| download | zig-1fecf86ebfa352927e2e3aedfd16b0755482e96e.tar.gz zig-1fecf86ebfa352927e2e3aedfd16b0755482e96e.zip | |
x86_64: refactor codegen memory repr
Also refactor linker reloc lowering.
Diffstat (limited to 'src/arch/x86_64/CodeGen.zig')
| -rw-r--r-- | src/arch/x86_64/CodeGen.zig | 1400 |
1 files changed, 776 insertions, 624 deletions
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 290c585095..11fb9dbdb7 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -115,6 +115,7 @@ const mir_to_air_map_init = if (builtin.mode == .Debug) std.AutoHashMapUnmanaged const FrameAddr = struct { index: FrameIndex, off: i32 = 0 }; const RegisterOffset = struct { reg: Register, off: i32 = 0 }; +const SymbolOffset = struct { sym: u32, off: i32 = 0 }; const Owner = union(enum) { func_index: InternPool.Index, @@ -195,9 +196,9 @@ pub const MCValue = union(enum) { memory: u64, /// The value is in memory at an address not-yet-allocated by the linker. /// This traditionally corresponds to a relocation emitted in a relocatable object file. - load_symbol: u32, + load_symbol: SymbolOffset, /// The address of the memory location not-yet-allocated by the linker. - lea_symbol: u32, + lea_symbol: SymbolOffset, /// The value is in memory at a constant offset from the address in a register. indirect: RegisterOffset, /// The value is in memory. @@ -315,7 +316,7 @@ pub const MCValue = union(enum) { .load_got => |sym_index| .{ .lea_got = sym_index }, .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, .load_frame => |frame_addr| .{ .lea_frame = frame_addr }, - .load_symbol => |sym_index| .{ .lea_symbol = sym_index }, + .load_symbol => |sym_off| .{ .lea_symbol = sym_off }, }; } @@ -387,7 +388,7 @@ pub const MCValue = union(enum) { }; } - fn mem(mcv: MCValue, ptr_size: Memory.PtrSize) Memory { + fn mem(mcv: MCValue, size: Memory.Size) Memory { return switch (mcv) { .none, .unreach, @@ -411,18 +412,27 @@ pub const MCValue = union(enum) { .load_symbol, .lea_symbol, => unreachable, - .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr| - Memory.sib(ptr_size, .{ .base = .{ .reg = .ds }, .disp = small_addr }) - else - Memory.moffs(.ds, addr), - .indirect => |reg_off| Memory.sib(ptr_size, .{ + .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr| .{ + .base = .{ .reg = .ds }, + .mod = .{ .rm = .{ + .size = size, + .disp = small_addr, + } }, + } else .{ .base = .{ .reg = .ds }, .mod = .{ .off = addr } }, + .indirect => |reg_off| .{ .base = .{ .reg = reg_off.reg }, - .disp = reg_off.off, - }), - .load_frame => |frame_addr| Memory.sib(ptr_size, .{ + .mod = .{ .rm = .{ + .size = size, + .disp = reg_off.off, + } }, + }, + .load_frame => |frame_addr| .{ .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off, - }), + .mod = .{ .rm = .{ + .size = size, + .disp = frame_addr.off, + } }, + }, }; } @@ -442,6 +452,8 @@ pub const MCValue = union(enum) { .register_overflow => |pl| try writer.print("{s}:{s}", .{ @tagName(pl.eflags), @tagName(pl.reg), }), + .load_symbol => |pl| try writer.print("[{} + 0x{x}]", .{ pl.sym, pl.off }), + .lea_symbol => |pl| try writer.print("{} + 0x{x}", .{ pl.sym, pl.off }), .indirect => |pl| try writer.print("[{s} + 0x{x}]", .{ @tagName(pl.reg), pl.off }), .load_direct => |pl| try writer.print("[direct:{d}]", .{pl}), .lea_direct => |pl| try writer.print("direct:{d}", .{pl}), @@ -453,8 +465,6 @@ pub const MCValue = union(enum) { .lea_frame => |pl| try writer.print("{} + 0x{x}", .{ pl.index, pl.off }), .reserved_frame => |pl| try writer.print("(dead:{})", .{pl}), .air_ref => |pl| try writer.print("(air:0x{x})", .{@intFromEnum(pl)}), - .load_symbol => |pl| try writer.print("[symbol:{d}]", .{pl}), - .lea_symbol => |pl| try writer.print("symbol:{d}", .{pl}), } } }; @@ -888,7 +898,6 @@ pub fn generate( .cc = cc, .src_loc = src_loc, }, - .bin_file = bin_file, .debug_output = debug_output, .code = code, .prev_di_pc = 0, @@ -976,7 +985,6 @@ pub fn generateLazy( .cc = abi.resolveCallingConvention(.Unspecified, function.target.*), .src_loc = src_loc, }, - .bin_file = bin_file, .debug_output = debug_output, .code = code, .prev_di_pc = undefined, // no debug info yet @@ -1139,7 +1147,7 @@ fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { inline for (fields) |field| { self.mir_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), - i32 => @bitCast(@field(extra, field.name)), + i32, Mir.Memory.Info => @bitCast(@field(extra, field.name)), else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)), }); } @@ -1178,17 +1186,9 @@ fn asmCmovccRegisterMemory(self: *Self, cc: Condition, reg: Register, m: Memory) .nz_or_p => .pseudo, }, .ops = switch (cc) { - else => switch (m) { - .sib => .rm_sib, - .rip => .rm_rip, - else => unreachable, - }, + else => .rm, .z_and_np => unreachable, - .nz_or_p => switch (m) { - .sib => .pseudo_cmov_nz_or_p_rm_sib, - .rip => .pseudo_cmov_nz_or_p_rm_rip, - else => unreachable, - }, + .nz_or_p => .pseudo_cmov_nz_or_p_rm, }, .data = .{ .rx = .{ .fixes = switch (cc) { @@ -1197,11 +1197,7 @@ fn asmCmovccRegisterMemory(self: *Self, cc: Condition, reg: Register, m: Memory) .nz_or_p => ._, }, .r1 = reg, - .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - }, + .payload = try self.addExtra(Mir.Memory.encode(m)), } }, }); } @@ -1231,32 +1227,16 @@ fn asmSetccRegister(self: *Self, cc: Condition, reg: Register) !void { } fn asmSetccMemory(self: *Self, cc: Condition, m: Memory) !void { - const payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - }; + const payload = try self.addExtra(Mir.Memory.encode(m)); _ = try self.addInst(.{ .tag = switch (cc) { else => .set, .z_and_np, .nz_or_p => .pseudo, }, .ops = switch (cc) { - else => switch (m) { - .sib => .m_sib, - .rip => .m_rip, - else => unreachable, - }, - .z_and_np => switch (m) { - .sib => .pseudo_set_z_and_np_m_sib, - .rip => .pseudo_set_z_and_np_m_rip, - else => unreachable, - }, - .nz_or_p => switch (m) { - .sib => .pseudo_set_nz_or_p_m_sib, - .rip => .pseudo_set_nz_or_p_m_rip, - else => unreachable, - }, + else => .m, + .z_and_np => .pseudo_set_z_and_np_m, + .nz_or_p => .pseudo_set_nz_or_p_m, }, .data = switch (cc) { else => .{ .x = .{ @@ -1504,20 +1484,12 @@ fn asmRegisterRegisterMemory( ) !void { _ = try self.addInst(.{ .tag = tag[1], - .ops = switch (m) { - .sib => .rrm_sib, - .rip => .rrm_rip, - else => unreachable, - }, + .ops = .rrm, .data = .{ .rrx = .{ .fixes = tag[0], .r1 = reg1, .r2 = reg2, - .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - }, + .payload = try self.addExtra(Mir.Memory.encode(m)), } }, }); } @@ -1525,18 +1497,10 @@ fn asmRegisterRegisterMemory( fn asmMemory(self: *Self, tag: Mir.Inst.FixedTag, m: Memory) !void { _ = try self.addInst(.{ .tag = tag[1], - .ops = switch (m) { - .sib => .m_sib, - .rip => .m_rip, - else => unreachable, - }, + .ops = .m, .data = .{ .x = .{ .fixes = tag[0], - .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - }, + .payload = try self.addExtra(Mir.Memory.encode(m)), } }, }); } @@ -1544,19 +1508,11 @@ fn asmMemory(self: *Self, tag: Mir.Inst.FixedTag, m: Memory) !void { fn asmRegisterMemory(self: *Self, tag: Mir.Inst.FixedTag, reg: Register, m: Memory) !void { _ = try self.addInst(.{ .tag = tag[1], - .ops = switch (m) { - .sib => .rm_sib, - .rip => .rm_rip, - else => unreachable, - }, + .ops = .rm, .data = .{ .rx = .{ .fixes = tag[0], .r1 = reg, - .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - }, + .payload = try self.addExtra(Mir.Memory.encode(m)), } }, }); } @@ -1574,20 +1530,12 @@ fn asmRegisterMemoryImmediate( }) |small_imm| { _ = try self.addInst(.{ .tag = tag[1], - .ops = switch (m) { - .sib => .rmi_sib, - .rip => .rmi_rip, - else => unreachable, - }, + .ops = .rmi, .data = .{ .rix = .{ .fixes = tag[0], .r1 = reg, .i = small_imm, - .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - }, + .payload = try self.addExtra(Mir.Memory.encode(m)), } }, }); } else { @@ -1595,23 +1543,12 @@ fn asmRegisterMemoryImmediate( .signed => |s| @bitCast(s), .unsigned => unreachable, } }); - assert(payload + 1 == switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - }); + assert(payload + 1 == try self.addExtra(Mir.Memory.encode(m))); _ = try self.addInst(.{ .tag = tag[1], - .ops = switch (m) { - .sib => switch (imm) { - .signed => .rmi_sib_s, - .unsigned => .rmi_sib_u, - }, - .rip => switch (imm) { - .signed => .rmi_rip_s, - .unsigned => .rmi_rip_u, - }, - else => unreachable, + .ops = switch (imm) { + .signed => .rmi_s, + .unsigned => .rmi_u, }, .data = .{ .rx = .{ .fixes = tag[0], @@ -1632,21 +1569,13 @@ fn asmRegisterRegisterMemoryImmediate( ) !void { _ = try self.addInst(.{ .tag = tag[1], - .ops = switch (m) { - .sib => .rrmi_sib, - .rip => .rrmi_rip, - else => unreachable, - }, + .ops = .rrmi, .data = .{ .rrix = .{ .fixes = tag[0], .r1 = reg1, .r2 = reg2, .i = @intCast(imm.unsigned), - .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - }, + .payload = try self.addExtra(Mir.Memory.encode(m)), } }, }); } @@ -1654,19 +1583,11 @@ fn asmRegisterRegisterMemoryImmediate( fn asmMemoryRegister(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, reg: Register) !void { _ = try self.addInst(.{ .tag = tag[1], - .ops = switch (m) { - .sib => .mr_sib, - .rip => .mr_rip, - else => unreachable, - }, + .ops = .mr, .data = .{ .rx = .{ .fixes = tag[0], .r1 = reg, - .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - }, + .payload = try self.addExtra(Mir.Memory.encode(m)), } }, }); } @@ -1676,23 +1597,12 @@ fn asmMemoryImmediate(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, imm: Immed .signed => |s| @bitCast(s), .unsigned => |u| @intCast(u), } }); - assert(payload + 1 == switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - }); + assert(payload + 1 == try self.addExtra(Mir.Memory.encode(m))); _ = try self.addInst(.{ .tag = tag[1], - .ops = switch (m) { - .sib => switch (imm) { - .signed => .mi_sib_s, - .unsigned => .mi_sib_u, - }, - .rip => switch (imm) { - .signed => .mi_rip_s, - .unsigned => .mi_rip_u, - }, - else => unreachable, + .ops = switch (imm) { + .signed => .mi_s, + .unsigned => .mi_u, }, .data = .{ .x = .{ .fixes = tag[0], @@ -1710,20 +1620,12 @@ fn asmMemoryRegisterRegister( ) !void { _ = try self.addInst(.{ .tag = tag[1], - .ops = switch (m) { - .sib => .mrr_sib, - .rip => .mrr_rip, - else => unreachable, - }, + .ops = .mrr, .data = .{ .rrx = .{ .fixes = tag[0], .r1 = reg1, .r2 = reg2, - .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - }, + .payload = try self.addExtra(Mir.Memory.encode(m)), } }, }); } @@ -1737,20 +1639,12 @@ fn asmMemoryRegisterImmediate( ) !void { _ = try self.addInst(.{ .tag = tag[1], - .ops = switch (m) { - .sib => .mri_sib, - .rip => .mri_rip, - else => unreachable, - }, + .ops = .mri, .data = .{ .rix = .{ .fixes = tag[0], .r1 = reg, .i = @intCast(imm.unsigned), - .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - }, + .payload = try self.addExtra(Mir.Memory.encode(m)), } }, }); } @@ -1916,13 +1810,16 @@ fn gen(self: *Self) InnerError!void { if (need_frame_align or need_stack_adjust) { self.mir_instructions.set(backpatch_stack_dealloc, .{ .tag = .lea, - .ops = .rm_sib, + .ops = .rm, .data = .{ .rx = .{ .r1 = .rsp, - .payload = try self.addExtra(Mir.MemorySib.encode(Memory.sib(.qword, .{ + .payload = try self.addExtra(Mir.Memory.encode(.{ .base = .{ .reg = .rbp }, - .disp = -frame_layout.save_reg_list.size(), - }))), + .mod = .{ .rm = .{ + .size = .qword, + .disp = -frame_layout.save_reg_list.size(), + } }, + })), } }, }); } @@ -3098,14 +2995,14 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { .{ .vp_, .@"and" }, dst_reg, dst_reg, - splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(splat_abi_size)), + splat_addr_mcv.deref().mem(Memory.Size.fromSize(splat_abi_size)), ); try self.asmRegisterRegisterRegister(mir_tag, dst_reg, dst_reg, dst_reg); } else { try self.asmRegisterMemory( .{ .p_, .@"and" }, dst_reg, - splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(splat_abi_size)), + splat_addr_mcv.deref().mem(Memory.Size.fromSize(splat_abi_size)), ); try self.asmRegisterRegister(mir_tag, dst_reg, dst_reg); } @@ -3254,7 +3151,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { const frame_index = try self.allocFrameIndex(FrameAlloc.initType(Type.usize, mod)); try self.asmMemoryImmediate( .{ ._, .mov }, - Memory.sib(.qword, .{ .base = .{ .frame = frame_index } }), + .{ .base = .{ .frame = frame_index }, .mod = .{ .rm = .{ .size = .qword } } }, Immediate.u(0), ); @@ -3338,9 +3235,10 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { call_mcv.register_pair[0], call_mcv.register_pair[1], ); - try self.asmSetccMemory(.nz, Memory.sib(.byte, .{ + try self.asmSetccMemory(.nz, .{ .base = .{ .frame = signed_div_floor_state.frame_index }, - })); + .mod = .{ .rm = .{ .size = .byte } }, + }); try self.performReloc(signed_div_floor_state.reloc); const dst_mcv = try self.genCall( .{ .lib = .{ @@ -3356,9 +3254,10 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { try self.asmRegisterMemory( .{ ._, .sub }, dst_mcv.register_pair[0], - Memory.sib(.qword, .{ + .{ .base = .{ .frame = signed_div_floor_state.frame_index }, - }), + .mod = .{ .rm = .{ .size = .qword } }, + }, ); try self.asmRegisterImmediate( .{ ._, .sbb }, @@ -3891,7 +3790,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { ); try self.asmMemoryImmediate( .{ ._, .cmp }, - overflow.mem(self.memPtrSize(Type.c_int)), + overflow.mem(self.memSize(Type.c_int)), Immediate.s(0), ); try self.genSetMem( @@ -4123,7 +4022,7 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue .register => |reg| try self.asmRegister(tag, registerAlias(reg, abi_size)), .memory, .indirect, .load_frame => try self.asmMemory( tag, - mat_rhs.mem(Memory.PtrSize.fromSize(abi_size)), + mat_rhs.mem(Memory.Size.fromSize(abi_size)), ), else => unreachable, } @@ -4360,10 +4259,13 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { try self.asmRegisterMemory( .{ ._, .mov }, registerAlias(dst_reg, err_abi_size), - Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{ + .{ .base = .{ .reg = src_reg }, - .disp = err_off, - }), + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(err_abi_size), + .disp = err_off, + } }, + }, ); return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); @@ -4398,10 +4300,13 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const err_abi_size: u32 = @intCast(err_ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .mov }, - Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{ + .{ .base = .{ .reg = src_reg }, - .disp = err_off, - }), + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(err_abi_size), + .disp = err_off, + } }, + }, Immediate.u(0), ); @@ -4420,7 +4325,10 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), - Memory.sib(.qword, .{ .base = .{ .reg = src_reg }, .disp = pl_off }), + .{ + .base = .{ .reg = src_reg }, + .mod = .{ .rm = .{ .size = .qword, .disp = pl_off } }, + }, ); break :result .{ .register = dst_reg }; }; @@ -4542,10 +4450,13 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { .load_frame => |frame_addr| try self.asmMemoryImmediate( .{ ._, .mov }, - Memory.sib(.byte, .{ + .{ .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off + pl_abi_size, - }), + .mod = .{ .rm = .{ + .size = .byte, + .disp = frame_addr.off + pl_abi_size, + } }, + }, Immediate.u(1), ), } @@ -4658,10 +4569,10 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), - Memory.sib(.qword, .{ + .{ .base = .{ .reg = src_reg }, - .disp = @divExact(self.target.ptrBitWidth(), 8), - }), + .mod = .{ .rm = .{ .size = .qword, .disp = 8 } }, + }, ); return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); @@ -4791,13 +4702,16 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { try self.asmRegisterMemory( .{ ._, .lea }, addr_reg, - Memory.sib(.qword, .{ .base = .{ .frame = frame_index } }), + .{ .base = .{ .frame = frame_index }, .mod = .{ .rm = .{ .size = .qword } } }, ); }, .load_frame => |frame_addr| try self.asmRegisterMemory( .{ ._, .lea }, addr_reg, - Memory.sib(.qword, .{ .base = .{ .frame = frame_addr.index }, .disp = frame_addr.off }), + .{ + .base = .{ .frame = frame_addr.index }, + .mod = .{ .rm = .{ .size = .qword, .disp = frame_addr.off } }, + }, ), .memory, .load_symbol, @@ -5610,10 +5524,14 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { try self.asmRegisterMemory( .{ ._, .lea }, if (limb_abi_size > 4) tmp.to64() else tmp.to32(), - Memory.sib(.qword, .{ + .{ .base = .{ .reg = dst.to64() }, - .scale_index = .{ .index = tmp.to64(), .scale = 1 << 2 }, - }), + .mod = .{ .rm = .{ + .size = .qword, + .index = tmp.to64(), + .scale = .@"4", + } }, + }, ); // tmp = temp3 = ((temp2 >> 2) & 0x33...33) + ((temp2 & 0x33...33) << 2) try self.asmRegisterRegister(.{ ._, .mov }, dst, tmp); @@ -5633,10 +5551,14 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { try self.asmRegisterMemory( .{ ._, .lea }, if (limb_abi_size > 4) dst.to64() else dst.to32(), - Memory.sib(.qword, .{ + .{ .base = .{ .reg = tmp.to64() }, - .scale_index = .{ .index = dst.to64(), .scale = 1 << 1 }, - }), + .mod = .{ .rm = .{ + .size = .qword, + .index = dst.to64(), + .scale = .@"2", + } }, + }, ); // dst = ((temp3 >> 1) & 0x55...55) + ((temp3 & 0x55...55) << 1) } @@ -5713,12 +5635,13 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type) .abs => try vec_ty.maxInt(mod, vec_ty), else => unreachable, } }); - const sign_mem = if (sign_mcv.isMemory()) - sign_mcv.mem(Memory.PtrSize.fromSize(abi_size)) + const sign_mem: Memory = if (sign_mcv.isMemory()) + sign_mcv.mem(Memory.Size.fromSize(abi_size)) else - Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ + .{ .base = .{ .reg = try self.copyToTmpRegister(Type.usize, sign_mcv.address()) }, - }); + .mod = .{ .rm = .{ .size = Memory.Size.fromSize(abi_size) } }, + }; if (self.hasFeature(.avx)) try self.asmRegisterRegisterMemory( switch (scalar_bits) { @@ -5896,7 +5819,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: Ro mir_tag, dst_alias, dst_alias, - src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), + src_mcv.mem(Memory.Size.fromSize(abi_size)), Immediate.u(@as(u5, @bitCast(mode))), ) else try self.asmRegisterRegisterRegisterImmediate( mir_tag, @@ -5911,7 +5834,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: Ro else => if (src_mcv.isMemory()) try self.asmRegisterMemoryImmediate( mir_tag, dst_alias, - src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), + src_mcv.mem(Memory.Size.fromSize(abi_size)), Immediate.u(@as(u5, @bitCast(mode))), ) else try self.asmRegisterRegisterImmediate( mir_tag, @@ -5951,7 +5874,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void { .memory, .indirect, .load_frame => try self.asmCmovccRegisterMemory( .l, registerAlias(dst_mcv.register, cmov_abi_size), - src_mcv.mem(Memory.PtrSize.fromSize(cmov_abi_size)), + src_mcv.mem(Memory.Size.fromSize(cmov_abi_size)), ), else => { const val_reg = try self.copyToTmpRegister(ty, src_mcv); @@ -6051,7 +5974,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void { if (src_mcv.isMemory()) try self.asmRegisterMemory( mir_tag, dst_alias, - src_mcv.mem(self.memPtrSize(ty)), + src_mcv.mem(self.memSize(ty)), ) else try self.asmRegisterRegister( mir_tag, dst_alias, @@ -6157,7 +6080,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { if (src_mcv.isMemory()) try self.asmRegisterMemory( .{ .v_ps, .cvtph2 }, wide_reg, - src_mcv.mem(Memory.PtrSize.fromSize( + src_mcv.mem(Memory.Size.fromSize( @intCast(@divExact(wide_reg.bitSize(), 16)), )), ) else try self.asmRegisterRegister( @@ -6205,7 +6128,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { mir_tag, dst_reg, dst_reg, - src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), + src_mcv.mem(Memory.Size.fromSize(abi_size)), ) else try self.asmRegisterRegisterRegister( mir_tag, dst_reg, @@ -6218,7 +6141,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { else => if (src_mcv.isMemory()) try self.asmRegisterMemory( mir_tag, dst_reg, - src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), + src_mcv.mem(Memory.Size.fromSize(abi_size)), ) else try self.asmRegisterRegister( mir_tag, dst_reg, @@ -6337,14 +6260,13 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn if (val_bit_off < val_extra_bits) val_abi_size else val_abi_size * 2; if (load_abi_size <= 8) { const load_reg = registerAlias(dst_reg, load_abi_size); - try self.asmRegisterMemory( - .{ ._, .mov }, - load_reg, - Memory.sib(Memory.PtrSize.fromSize(load_abi_size), .{ - .base = .{ .reg = ptr_reg }, + try self.asmRegisterMemory(.{ ._, .mov }, load_reg, .{ + .base = .{ .reg = ptr_reg }, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(load_abi_size), .disp = val_byte_off, - }), - ); + } }, + }); try self.asmRegisterImmediate(.{ ._r, .sh }, load_reg, Immediate.u(val_bit_off)); } else { const tmp_reg = @@ -6353,22 +6275,20 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn defer self.register_manager.unlockReg(tmp_lock); const dst_alias = registerAlias(dst_reg, val_abi_size); - try self.asmRegisterMemory( - .{ ._, .mov }, - dst_alias, - Memory.sib(Memory.PtrSize.fromSize(val_abi_size), .{ - .base = .{ .reg = ptr_reg }, + try self.asmRegisterMemory(.{ ._, .mov }, dst_alias, .{ + .base = .{ .reg = ptr_reg }, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(val_abi_size), .disp = val_byte_off, - }), - ); - try self.asmRegisterMemory( - .{ ._, .mov }, - tmp_reg, - Memory.sib(Memory.PtrSize.fromSize(val_abi_size), .{ - .base = .{ .reg = ptr_reg }, + } }, + }); + try self.asmRegisterMemory(.{ ._, .mov }, tmp_reg, .{ + .base = .{ .reg = ptr_reg }, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(val_abi_size), .disp = val_byte_off + 1, - }), - ); + } }, + }); try self.asmRegisterRegisterImmediate( .{ ._rd, .sh }, dst_alias, @@ -6480,10 +6400,13 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In const part_bit_off = if (limb_i == 0) src_bit_off else 0; const part_bit_size = @min(src_bit_off + src_bit_size - limb_i * limb_abi_bits, limb_abi_bits) - part_bit_off; - const limb_mem = Memory.sib(Memory.PtrSize.fromSize(limb_abi_size), .{ + const limb_mem: Memory = .{ .base = .{ .reg = ptr_reg }, - .disp = src_byte_off + limb_i * limb_abi_bits, - }); + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(limb_abi_size), + .disp = src_byte_off + limb_i * limb_abi_bits, + } }, + }; const part_mask = (@as(u64, math.maxInt(u64)) >> @intCast(64 - part_bit_size)) << @intCast(part_bit_off); @@ -6814,7 +6737,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; } - const limb_abi_size: u32 = @min(field_abi_size, 8); + const limb_abi_size: u31 = @min(field_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; const field_byte_off: i32 = @intCast(field_off / limb_abi_bits * limb_abi_size); const field_bit_off = field_off % limb_abi_bits; @@ -6832,14 +6755,13 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { if (field_bit_off < field_extra_bits) field_abi_size else field_abi_size * 2; if (load_abi_size <= 8) { const load_reg = registerAlias(dst_reg, load_abi_size); - try self.asmRegisterMemory( - .{ ._, .mov }, - load_reg, - Memory.sib(Memory.PtrSize.fromSize(load_abi_size), .{ - .base = .{ .frame = frame_addr.index }, + try self.asmRegisterMemory(.{ ._, .mov }, load_reg, .{ + .base = .{ .frame = frame_addr.index }, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(load_abi_size), .disp = frame_addr.off + field_byte_off, - }), - ); + } }, + }); try self.asmRegisterImmediate(.{ ._r, .sh }, load_reg, Immediate.u(field_bit_off)); } else { const tmp_reg = registerAlias( @@ -6853,19 +6775,21 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { try self.asmRegisterMemory( .{ ._, .mov }, dst_alias, - Memory.sib(Memory.PtrSize.fromSize(field_abi_size), .{ + .{ .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off + field_byte_off, - }), - ); - try self.asmRegisterMemory( - .{ ._, .mov }, - tmp_reg, - Memory.sib(Memory.PtrSize.fromSize(field_abi_size), .{ - .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off + field_byte_off + @as(i32, @intCast(limb_abi_size)), - }), + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(field_abi_size), + .disp = frame_addr.off + field_byte_off, + } }, + }, ); + try self.asmRegisterMemory(.{ ._, .mov }, tmp_reg, .{ + .base = .{ .frame = frame_addr.index }, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(field_abi_size), + .disp = frame_addr.off + field_byte_off + limb_abi_size, + } }, + }); try self.asmRegisterRegisterImmediate( .{ ._rd, .sh }, dst_alias, @@ -6998,14 +6922,13 @@ fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MC defer self.register_manager.unlockReg(addr_reg_lock); try self.genSetReg(addr_reg, Type.usize, dst_mcv.address()); - try self.asmMemory( - mir_tag, - Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .{ .reg = addr_reg } }), - ); + try self.asmMemory(mir_tag, .{ .base = .{ .reg = addr_reg }, .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(abi_size), + } } }); }, .indirect, .load_frame => try self.asmMemory( mir_tag, - dst_mcv.mem(Memory.PtrSize.fromSize(abi_size)), + dst_mcv.mem(Memory.Size.fromSize(abi_size)), ), } } @@ -7053,25 +6976,34 @@ fn genShiftBinOpMir( }), }, .memory, .indirect, .load_frame => { - const lhs_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (lhs_mcv) { + const lhs_mem: Memory = switch (lhs_mcv) { .memory => |addr| .{ .base = .{ .reg = .ds }, - .disp = math.cast(i32, @as(i64, @bitCast(addr))) orelse - return self.fail("TODO genShiftBinOpMir between {s} and {s}", .{ - @tagName(lhs_mcv), - @tagName(rhs_mcv), - }), + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(abi_size), + .disp = math.cast(i32, @as(i64, @bitCast(addr))) orelse + return self.fail("TODO genShiftBinOpMir between {s} and {s}", .{ + @tagName(lhs_mcv), + @tagName(rhs_mcv), + }), + } }, }, .indirect => |reg_off| .{ .base = .{ .reg = reg_off.reg }, - .disp = reg_off.off, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(abi_size), + .disp = reg_off.off, + } }, }, .load_frame => |frame_addr| .{ .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(abi_size), + .disp = frame_addr.off, + } }, }, else => unreachable, - }); + }; switch (rhs_mcv) { .immediate => |rhs_imm| try self.asmMemoryImmediate( tag, @@ -7187,26 +7119,35 @@ fn genShiftBinOpMir( try self.asmRegisterMemory( .{ ._, .mov }, tmp_reg, - Memory.sib(.qword, .{ + .{ .base = .{ .frame = dst_frame_addr.index }, - .disp = dst_frame_addr.off + info.indices[0] * 8, - }), + .mod = .{ .rm = .{ + .size = .qword, + .disp = dst_frame_addr.off + info.indices[0] * 8, + } }, + }, ); try self.asmMemoryRegisterImmediate( info.double_tag, - Memory.sib(.qword, .{ + .{ .base = .{ .frame = dst_frame_addr.index }, - .disp = dst_frame_addr.off + info.indices[1] * 8, - }), + .mod = .{ .rm = .{ + .size = .qword, + .disp = dst_frame_addr.off + info.indices[1] * 8, + } }, + }, tmp_reg, Immediate.u(rhs_imm), ); try self.asmMemoryImmediate( tag, - Memory.sib(.qword, .{ + .{ .base = .{ .frame = dst_frame_addr.index }, - .disp = dst_frame_addr.off + info.indices[0] * 8, - }), + .mod = .{ .rm = .{ + .size = .qword, + .disp = dst_frame_addr.off + info.indices[0] * 8, + } }, + }, Immediate.u(rhs_imm), ); } else { @@ -7214,10 +7155,13 @@ fn genShiftBinOpMir( try self.asmRegisterMemory( .{ ._, .mov }, tmp_reg, - Memory.sib(.qword, .{ + .{ .base = .{ .frame = dst_frame_addr.index }, - .disp = dst_frame_addr.off + info.indices[0] * 8, - }), + .mod = .{ .rm = .{ + .size = .qword, + .disp = dst_frame_addr.off + info.indices[0] * 8, + } }, + }, ); if (rhs_imm > 64) try self.asmRegisterImmediate( tag, @@ -7226,27 +7170,36 @@ fn genShiftBinOpMir( ); try self.asmMemoryRegister( .{ ._, .mov }, - Memory.sib(.qword, .{ + .{ .base = .{ .frame = dst_frame_addr.index }, - .disp = dst_frame_addr.off + info.indices[1] * 8, - }), + .mod = .{ .rm = .{ + .size = .qword, + .disp = dst_frame_addr.off + info.indices[1] * 8, + } }, + }, tmp_reg, ); if (tag[0] == ._r and tag[1] == .sa) try self.asmMemoryImmediate( tag, - Memory.sib(.qword, .{ + .{ .base = .{ .frame = dst_frame_addr.index }, - .disp = dst_frame_addr.off + info.indices[0] * 8, - }), + .mod = .{ .rm = .{ + .size = .qword, + .disp = dst_frame_addr.off + info.indices[0] * 8, + } }, + }, Immediate.u(63), ) else { try self.asmRegisterRegister(.{ ._, .xor }, tmp_reg.to32(), tmp_reg.to32()); try self.asmMemoryRegister( .{ ._, .mov }, - Memory.sib(.qword, .{ + .{ .base = .{ .frame = dst_frame_addr.index }, - .disp = dst_frame_addr.off + info.indices[0] * 8, - }), + .mod = .{ .rm = .{ + .size = .qword, + .disp = dst_frame_addr.off + info.indices[0] * 8, + } }, + }, tmp_reg, ); } @@ -7265,18 +7218,24 @@ fn genShiftBinOpMir( try self.asmRegisterMemory( .{ ._, .mov }, first_reg, - Memory.sib(.qword, .{ + .{ .base = .{ .frame = dst_frame_addr.index }, - .disp = dst_frame_addr.off + info.indices[0] * 8, - }), + .mod = .{ .rm = .{ + .size = .qword, + .disp = dst_frame_addr.off + info.indices[0] * 8, + } }, + }, ); try self.asmRegisterMemory( .{ ._, .mov }, second_reg, - Memory.sib(.qword, .{ + .{ .base = .{ .frame = dst_frame_addr.index }, - .disp = dst_frame_addr.off + info.indices[1] * 8, - }), + .mod = .{ .rm = .{ + .size = .qword, + .disp = dst_frame_addr.off + info.indices[1] * 8, + } }, + }, ); if (tag[0] == ._r and tag[1] == .sa) { try self.asmRegisterRegister(.{ ._, .mov }, tmp_reg, first_reg); @@ -7302,18 +7261,24 @@ fn genShiftBinOpMir( try self.asmCmovccRegisterRegister(.ae, first_reg, tmp_reg); try self.asmMemoryRegister( .{ ._, .mov }, - Memory.sib(.qword, .{ + .{ .base = .{ .frame = dst_frame_addr.index }, - .disp = dst_frame_addr.off + info.indices[1] * 8, - }), + .mod = .{ .rm = .{ + .size = .qword, + .disp = dst_frame_addr.off + info.indices[1] * 8, + } }, + }, second_reg, ); try self.asmMemoryRegister( .{ ._, .mov }, - Memory.sib(.qword, .{ + .{ .base = .{ .frame = dst_frame_addr.index }, - .disp = dst_frame_addr.off + info.indices[0] * 8, - }), + .mod = .{ .rm = .{ + .size = .qword, + .disp = dst_frame_addr.off + info.indices[0] * 8, + } }, + }, first_reg, ); }, @@ -7541,22 +7506,20 @@ fn genMulDivBinOp( }, dst_abi_size) }; const dst_mcv = try self.allocRegOrMemAdvanced(dst_ty, maybe_inst, false); - try self.asmMemoryRegister( - .{ ._, .mov }, - Memory.sib(.qword, .{ - .base = .{ .frame = dst_mcv.load_frame.index }, + try self.asmMemoryRegister(.{ ._, .mov }, .{ + .base = .{ .frame = dst_mcv.load_frame.index }, + .mod = .{ .rm = .{ + .size = .qword, .disp = dst_mcv.load_frame.off, - }), - .rax, - ); - try self.asmMemoryRegister( - .{ ._, .mov }, - Memory.sib(.qword, .{ - .base = .{ .frame = dst_mcv.load_frame.index }, + } }, + }, .rax); + try self.asmMemoryRegister(.{ ._, .mov }, .{ + .base = .{ .frame = dst_mcv.load_frame.index }, + .mod = .{ .rm = .{ + .size = .qword, .disp = dst_mcv.load_frame.off + 8, - }), - .rdx, - ); + } }, + }, .rdx); return dst_mcv; }, @@ -7768,7 +7731,7 @@ fn genBinOp( mir_tag, dst_reg, dst_reg, - src_mcv.mem(Memory.PtrSize.fromBitSize(float_bits)), + src_mcv.mem(Memory.Size.fromBitSize(float_bits)), ) else try self.asmRegisterRegisterRegister( mir_tag, dst_reg, @@ -7787,7 +7750,7 @@ fn genBinOp( if (src_mcv.isMemory()) try self.asmRegisterMemory( mir_tag, dst_reg, - src_mcv.mem(Memory.PtrSize.fromBitSize(float_bits)), + src_mcv.mem(Memory.Size.fromBitSize(float_bits)), ) else try self.asmRegisterRegister( mir_tag, dst_reg, @@ -8149,21 +8112,30 @@ fn genBinOp( .memory, .indirect, .load_frame => try self.asmCmovccRegisterMemory( cc, registerAlias(tmp_reg, cmov_abi_size), - Memory.sib(Memory.PtrSize.fromSize(cmov_abi_size), switch (mat_src_mcv) { + switch (mat_src_mcv) { .memory => |addr| .{ .base = .{ .reg = .ds }, - .disp = @intCast(@as(i64, @bitCast(addr))), + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(cmov_abi_size), + .disp = @intCast(@as(i64, @bitCast(addr))), + } }, }, .indirect => |reg_off| .{ .base = .{ .reg = reg_off.reg }, - .disp = reg_off.off, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(cmov_abi_size), + .disp = reg_off.off, + } }, }, .load_frame => |frame_addr| .{ .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(cmov_abi_size), + .disp = frame_addr.off, + } }, }, else => unreachable, - }), + }, ), } try self.genCopy(lhs_ty, dst_mcv, .{ .register = tmp_reg }); @@ -8912,8 +8884,8 @@ fn genBinOp( dst_reg, lhs_reg, src_mcv.mem(switch (lhs_ty.zigTypeTag(mod)) { - else => Memory.PtrSize.fromSize(abi_size), - .Vector => Memory.PtrSize.fromBitSize(dst_reg.bitSize()), + else => Memory.Size.fromSize(abi_size), + .Vector => Memory.Size.fromBitSize(dst_reg.bitSize()), }), ) else try self.asmRegisterRegisterRegister( mir_tag, @@ -8930,8 +8902,8 @@ fn genBinOp( mir_tag, dst_reg, src_mcv.mem(switch (lhs_ty.zigTypeTag(mod)) { - else => Memory.PtrSize.fromSize(abi_size), - .Vector => Memory.PtrSize.fromBitSize(dst_reg.bitSize()), + else => Memory.Size.fromSize(abi_size), + .Vector => Memory.Size.fromBitSize(dst_reg.bitSize()), }), ) else try self.asmRegisterRegister( mir_tag, @@ -9202,12 +9174,12 @@ fn genBinOp( .ty = lhs_ty, .val = try unsigned_ty.maxInt(mod, unsigned_ty), }); - const not_mem = if (not_mcv.isMemory()) - not_mcv.mem(Memory.PtrSize.fromSize(abi_size)) + const not_mem: Memory = if (not_mcv.isMemory()) + not_mcv.mem(Memory.Size.fromSize(abi_size)) else - Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .{ + .{ .base = .{ .reg = try self.copyToTmpRegister(Type.usize, not_mcv.address()), - } }); + }, .mod = .{ .rm = .{ .size = Memory.Size.fromSize(abi_size) } } }; switch (mir_tag[0]) { .vp_b, .vp_d, .vp_q, .vp_w => try self.asmRegisterRegisterMemory( .{ .vp_, .xor }, @@ -9359,25 +9331,30 @@ fn genBinOpMir( .lea_frame, => { blk: { - return self.asmRegisterMemory( - mir_limb_tag, - dst_alias, - Memory.sib(Memory.PtrSize.fromSize(limb_abi_size), switch (src_mcv) { - .memory => |addr| .{ - .base = .{ .reg = .ds }, + return self.asmRegisterMemory(mir_limb_tag, dst_alias, switch (src_mcv) { + .memory => |addr| .{ + .base = .{ .reg = .ds }, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(limb_abi_size), .disp = math.cast(i32, addr + off) orelse break :blk, - }, - .indirect => |reg_off| .{ - .base = .{ .reg = reg_off.reg }, + } }, + }, + .indirect => |reg_off| .{ + .base = .{ .reg = reg_off.reg }, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(limb_abi_size), .disp = reg_off.off + off, - }, - .load_frame => |frame_addr| .{ - .base = .{ .frame = frame_addr.index }, + } }, + }, + .load_frame => |frame_addr| .{ + .base = .{ .frame = frame_addr.index }, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(limb_abi_size), .disp = frame_addr.off + off, - }, - else => break :blk, - }), - ); + } }, + }, + else => break :blk, + }); } switch (src_mcv) { @@ -9510,26 +9487,35 @@ fn genBinOpMir( }), }, }; - const dst_limb_mem = Memory.sib( - Memory.PtrSize.fromSize(limb_abi_size), - switch (dst_mcv) { - .memory, - .load_symbol, - .load_got, - .load_direct, - .load_tlv, - => .{ .base = .{ .reg = dst_info.?.addr_reg }, .disp = off }, - .indirect => |reg_off| .{ - .base = .{ .reg = reg_off.reg }, + const dst_limb_mem: Memory = switch (dst_mcv) { + .memory, + .load_symbol, + .load_got, + .load_direct, + .load_tlv, + => .{ + .base = .{ .reg = dst_info.?.addr_reg }, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(limb_abi_size), + .disp = off, + } }, + }, + .indirect => |reg_off| .{ + .base = .{ .reg = reg_off.reg }, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(limb_abi_size), .disp = reg_off.off + off, - }, - .load_frame => |frame_addr| .{ - .base = .{ .frame = frame_addr.index }, + } }, + }, + .load_frame => |frame_addr| .{ + .base = .{ .frame = frame_addr.index }, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(limb_abi_size), .disp = frame_addr.off + off, - }, - else => unreachable, + } }, }, - ); + else => unreachable, + }; switch (resolved_src_mcv) { .none, .unreach, @@ -9730,29 +9716,38 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M .memory, .indirect, .load_frame => try self.asmRegisterMemory( .{ .i_, .mul }, dst_alias, - Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (resolved_src_mcv) { + switch (resolved_src_mcv) { .memory => |addr| .{ .base = .{ .reg = .ds }, - .disp = math.cast(i32, @as(i64, @bitCast(addr))) orelse - return self.asmRegisterRegister( - .{ .i_, .mul }, - dst_alias, - registerAlias( - try self.copyToTmpRegister(dst_ty, resolved_src_mcv), - abi_size, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(abi_size), + .disp = math.cast(i32, @as(i64, @bitCast(addr))) orelse + return self.asmRegisterRegister( + .{ .i_, .mul }, + dst_alias, + registerAlias( + try self.copyToTmpRegister(dst_ty, resolved_src_mcv), + abi_size, + ), ), - ), + } }, }, .indirect => |reg_off| .{ .base = .{ .reg = reg_off.reg }, - .disp = reg_off.off, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(abi_size), + .disp = reg_off.off, + } }, }, .load_frame => |frame_addr| .{ .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(abi_size), + .disp = frame_addr.off, + } }, }, else => unreachable, - }), + }, ), } }, @@ -9857,9 +9852,14 @@ fn genVarDbgInfo( // .offset = -off, //} }, .memory => |address| .{ .memory = address }, - .load_symbol => |sym_index| .{ .linker_load = .{ .type = .direct, .sym_index = sym_index } }, // TODO + .load_symbol => |sym_off| loc: { + assert(sym_off.off == 0); + break :loc .{ .linker_load = .{ .type = .direct, .sym_index = sym_off.sym } }; + }, // TODO .load_got => |sym_index| .{ .linker_load = .{ .type = .got, .sym_index = sym_index } }, - .load_direct => |sym_index| .{ .linker_load = .{ .type = .direct, .sym_index = sym_index } }, + .load_direct => |sym_index| .{ + .linker_load = .{ .type = .direct, .sym_index = sym_index }, + }, .immediate => |x| .{ .immediate = x }, .undef => .undef, .none => .none, @@ -10123,19 +10123,16 @@ fn genCall(self: *Self, info: union(enum) { try self.genSetReg( callee_reg, Type.usize, - .{ .load_symbol = sym.esym_index }, + .{ .load_symbol = .{ .sym = sym.esym_index } }, ); try self.asmRegister(.{ ._, .call }, callee_reg); - } else { - _ = try self.addInst(.{ - .tag = .call, - .ops = .linker_reloc, - .data = .{ .reloc = .{ - .atom_index = try self.owner.getSymbolIndex(self), - .sym_index = sym.esym_index, - } }, - }); - } + } else try self.asmMemory(.{ ._, .call }, .{ + .base = .{ .reloc = .{ + .atom_index = try self.owner.getSymbolIndex(self), + .sym_index = sym.esym_index, + } }, + .mod = .{ .rm = .{ .size = .qword } }, + }); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl); const sym_index = coff_file.getAtom(atom).getSymbolIndex().?; @@ -10149,10 +10146,13 @@ fn genCall(self: *Self, info: union(enum) { } else if (self.bin_file.cast(link.File.Plan9)) |p9| { const atom_index = try p9.seeDecl(func.owner_decl); const atom = p9.getAtom(atom_index); - try self.asmMemory(.{ ._, .call }, Memory.sib(.qword, .{ + try self.asmMemory(.{ ._, .call }, .{ .base = .{ .reg = .ds }, - .disp = @intCast(atom.getOffsetTableAddress(p9)), - })); + .mod = .{ .rm = .{ + .size = .qword, + .disp = @intCast(atom.getOffsetTableAddress(p9)), + } }, + }); } else unreachable; }, .extern_func => |extern_func| { @@ -10637,7 +10637,10 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { try self.asmRegisterMemory( .{ ._, .cmp }, registerAlias(dst_reg, op_abi_size), - Memory.sib(Memory.PtrSize.fromSize(op_abi_size), .{ .base = .{ .reg = addr_reg } }), + .{ + .base = .{ .reg = addr_reg }, + .mod = .{ .rm = .{ .size = Memory.Size.fromSize(op_abi_size) } }, + }, ); self.eflags_inst = inst; @@ -10891,10 +10894,13 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC const some_abi_size: u32 = @intCast(some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, - Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ + .{ .base = .{ .reg = addr_reg }, - .disp = some_info.off, - }), + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(some_abi_size), + .disp = some_info.off, + } }, + }, Immediate.u(0), ); return .{ .eflags = .e }; @@ -10904,17 +10910,23 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC const some_abi_size: u32 = @intCast(some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, - Memory.sib(Memory.PtrSize.fromSize(some_abi_size), switch (opt_mcv) { + switch (opt_mcv) { .indirect => |reg_off| .{ .base = .{ .reg = reg_off.reg }, - .disp = reg_off.off + some_info.off, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(some_abi_size), + .disp = reg_off.off + some_info.off, + } }, }, .load_frame => |frame_addr| .{ .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off + some_info.off, + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(some_abi_size), + .disp = frame_addr.off + some_info.off, + } }, }, else => unreachable, - }), + }, Immediate.u(0), ); return .{ .eflags = .e }; @@ -10944,10 +10956,13 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const some_abi_size: u32 = @intCast(some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, - Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ + .{ .base = .{ .reg = ptr_reg }, - .disp = some_info.off, - }), + .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(some_abi_size), + .disp = some_info.off, + } }, + }, Immediate.u(0), ); @@ -11020,10 +11035,13 @@ fn isErrPtr(self: *Self, maybe_inst: ?Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCV const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(mod), mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, - Memory.sib(self.memPtrSize(Type.anyerror), .{ + .{ .base = .{ .reg = ptr_reg }, - .disp = err_off, - }), + .mod = .{ .rm = .{ + .size = self.memSize(Type.anyerror), + .disp = err_off, + } }, + }, Immediate.u(0), ); @@ -11601,7 +11619,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { label_gop.value_ptr.target = @intCast(self.mir_instructions.len); } else continue; - var mnem_size: ?Memory.PtrSize = null; + var mnem_size: ?Memory.Size = null; const mnem_tag = mnem: { mnem_size = if (mem.endsWith(u8, mnem_str, "b")) .byte @@ -11620,7 +11638,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { mnem_size = null; break :mnem std.meta.stringToEnum(Instruction.Mnemonic, mnem_str); } orelse return self.fail("invalid mnemonic: '{s}'", .{mnem_str}); - if (@as(?Memory.PtrSize, switch (mnem_tag) { + if (@as(?Memory.Size, switch (mnem_tag) { .fldenv, .fnstenv, .fstenv => .none, .ldmxcsr, .stmxcsr, .vldmxcsr, .vstmxcsr => .dword, else => null, @@ -11685,10 +11703,13 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { if (colon) |colon_pos| { const disp = std.fmt.parseInt(i32, op_str[colon_pos + ":".len ..], 0) catch return self.fail("invalid displacement: '{s}'", .{op_str}); - op.* = .{ .mem = Memory.sib( - mnem_size orelse return self.fail("unknown size: '{s}'", .{op_str}), - .{ .base = .{ .reg = reg }, .disp = disp }, - ) }; + op.* = .{ .mem = .{ + .base = .{ .reg = reg }, + .mod = .{ .rm = .{ + .size = mnem_size orelse return self.fail("unknown size: '{s}'", .{op_str}), + .disp = disp, + } }, + } }; } else { if (mnem_size) |size| if (reg.bitSize() != size.bitSize()) return self.fail("invalid register size: '{s}'", .{op_str}); @@ -11712,34 +11733,45 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { .{ .reg = reg } else return self.fail("invalid modifier: '{s}'", .{modifier}), - .memory => |addr| if (mem.eql(u8, modifier, "") or - mem.eql(u8, modifier, "P")) - .{ .mem = Memory.sib( - mnem_size orelse return self.fail("unknown size: '{s}'", .{op_str}), - .{ .base = .{ .reg = .ds }, .disp = @intCast(@as(i64, @bitCast(addr))) }, - ) } + .memory => |addr| if (mem.eql(u8, modifier, "") or mem.eql(u8, modifier, "P")) + .{ .mem = .{ + .base = .{ .reg = .ds }, + .mod = .{ .rm = .{ + .size = mnem_size orelse + return self.fail("unknown size: '{s}'", .{op_str}), + .disp = @intCast(@as(i64, @bitCast(addr))), + } }, + } } else return self.fail("invalid modifier: '{s}'", .{modifier}), .indirect => |reg_off| if (mem.eql(u8, modifier, "")) - .{ .mem = Memory.sib( - mnem_size orelse return self.fail("unknown size: '{s}'", .{op_str}), - .{ .base = .{ .reg = reg_off.reg }, .disp = reg_off.off }, - ) } + .{ .mem = .{ + .base = .{ .reg = reg_off.reg }, + .mod = .{ .rm = .{ + .size = mnem_size orelse + return self.fail("unknown size: '{s}'", .{op_str}), + .disp = reg_off.off, + } }, + } } else return self.fail("invalid modifier: '{s}'", .{modifier}), .load_frame => |frame_addr| if (mem.eql(u8, modifier, "")) - .{ .mem = Memory.sib( - mnem_size orelse return self.fail("unknown size: '{s}'", .{op_str}), - .{ .base = .{ .frame = frame_addr.index }, .disp = frame_addr.off }, - ) } + .{ .mem = .{ + .base = .{ .frame = frame_addr.index }, + .mod = .{ .rm = .{ + .size = mnem_size orelse + return self.fail("unknown size: '{s}'", .{op_str}), + .disp = frame_addr.off, + } }, + } } else return self.fail("invalid modifier: '{s}'", .{modifier}), .lea_got => |sym_index| if (mem.eql(u8, modifier, "P")) .{ .reg = try self.copyToTmpRegister(Type.usize, .{ .lea_got = sym_index }) } else return self.fail("invalid modifier: '{s}'", .{modifier}), - .load_symbol => |sym_index| if (mem.eql(u8, modifier, "P")) - .{ .reg = try self.copyToTmpRegister(Type.usize, .{ .load_symbol = sym_index }) } + .load_symbol => |sym_off| if (mem.eql(u8, modifier, "P")) + .{ .reg = try self.copyToTmpRegister(Type.usize, .{ .load_symbol = sym_off }) } else return self.fail("invalid modifier: '{s}'", .{modifier}), else => return self.fail("invalid constraint: '{s}'", .{op_str}), @@ -11774,47 +11806,61 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const scale_str = sib_it.next() orelse ""; if (index_str.len == 0 and scale_str.len > 0) return self.fail("invalid memory operand: '{s}'", .{op_str}); - const scale = if (scale_str.len > 0) switch (std.fmt.parseInt(u4, scale_str, 10) catch - return self.fail("invalid scale: '{s}'", .{op_str})) { - 1, 2, 4, 8 => |scale| scale, - else => return self.fail("invalid scale: '{s}'", .{op_str}), - } else 1; + const scale: Memory.Scale = if (scale_str.len > 0) + switch (std.fmt.parseInt(u4, scale_str, 10) catch + return self.fail("invalid scale: '{s}'", .{op_str})) { + 1 => .@"1", + 2 => .@"2", + 4 => .@"4", + 8 => .@"8", + else => return self.fail("invalid scale: '{s}'", .{op_str}), + } + else + .@"1"; if (sib_it.next()) |_| return self.fail("invalid memory operand: '{s}'", .{op_str}); - op.* = .{ .mem = Memory.sib(mnem_size orelse - return self.fail("unknown size: '{s}'", .{op_str}), .{ - .disp = if (mem.startsWith(u8, op_str[0..open], "%[") and - mem.endsWith(u8, op_str[0..open], "]")) - disp: { - const colon = mem.indexOfScalarPos(u8, op_str[0..open], "%[".len, ':'); - const modifier = if (colon) |colon_pos| - op_str[colon_pos + ":".len .. open - "]".len] + op.* = .{ + .mem = .{ + .base = if (base_str.len > 0) + .{ .reg = parseRegName(base_str["%%".len..]) orelse + return self.fail("invalid base register: '{s}'", .{base_str}) } else - ""; - break :disp switch (args.items[ - arg_map.get(op_str["%[".len .. colon orelse open - "]".len]) orelse - return self.fail("no matching constraint: '{s}'", .{op_str}) - ]) { - .immediate => |imm| if (mem.eql(u8, modifier, "") or - mem.eql(u8, modifier, "c")) - math.cast(i32, @as(i64, @bitCast(imm))) orelse + .none, + .mod = .{ .rm = .{ + .size = mnem_size orelse return self.fail("unknown size: '{s}'", .{op_str}), + .index = if (index_str.len > 0) + parseRegName(index_str["%%".len..]) orelse + return self.fail("invalid index register: '{s}'", .{op_str}) + else + .none, + .scale = scale, + .disp = if (mem.startsWith(u8, op_str[0..open], "%[") and + mem.endsWith(u8, op_str[0..open], "]")) + disp: { + const colon = mem.indexOfScalarPos(u8, op_str[0..open], "%[".len, ':'); + const modifier = if (colon) |colon_pos| + op_str[colon_pos + ":".len .. open - "]".len] + else + ""; + break :disp switch (args.items[ + arg_map.get(op_str["%[".len .. colon orelse open - "]".len]) orelse + return self.fail("no matching constraint: '{s}'", .{op_str}) + ]) { + .immediate => |imm| if (mem.eql(u8, modifier, "") or + mem.eql(u8, modifier, "c")) + math.cast(i32, @as(i64, @bitCast(imm))) orelse + return self.fail("invalid displacement: '{s}'", .{op_str}) + else + return self.fail("invalid modifier: '{s}'", .{modifier}), + else => return self.fail("invalid constraint: '{s}'", .{op_str}), + }; + } else if (open > 0) + std.fmt.parseInt(i32, op_str[0..open], 0) catch return self.fail("invalid displacement: '{s}'", .{op_str}) else - return self.fail("invalid modifier: '{s}'", .{modifier}), - else => return self.fail("invalid constraint: '{s}'", .{op_str}), - }; - } else if (open > 0) - std.fmt.parseInt(i32, op_str[0..open], 0) catch - return self.fail("invalid displacement: '{s}'", .{op_str}) - else - 0, - .base = if (base_str.len > 0) .{ .reg = parseRegName(base_str["%%".len..]) orelse - return self.fail("invalid base register: '{s}'", .{base_str}) } else .none, - .scale_index = if (index_str.len > 0) .{ - .index = parseRegName(index_str["%%".len..]) orelse - return self.fail("invalid index register: '{s}'", .{op_str}), - .scale = scale, - } else null, - }) }; + 0, + } }, + }, + }; } else if (Label.isValid(.reference, op_str)) { const anon = std.ascii.isDigit(op_str[0]); const label_gop = try labels.getOrPut(self.gpa, op_str[0..if (anon) 1 else op_str.len]); @@ -12534,20 +12580,23 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr ), .lea_frame => .{ .move = .{ ._, .lea } }, else => unreachable, - }).read(self, registerAlias(dst_reg, abi_size), Memory.sib( - self.memPtrSize(ty), - switch (src_mcv) { - .register_offset, .indirect => |reg_off| .{ - .base = .{ .reg = reg_off.reg }, + }).read(self, registerAlias(dst_reg, abi_size), switch (src_mcv) { + .register_offset, .indirect => |reg_off| .{ + .base = .{ .reg = reg_off.reg }, + .mod = .{ .rm = .{ + .size = self.memSize(ty), .disp = reg_off.off, - }, - .load_frame, .lea_frame => |frame_addr| .{ - .base = .{ .frame = frame_addr.index }, + } }, + }, + .load_frame, .lea_frame => |frame_addr| .{ + .base = .{ .frame = frame_addr.index }, + .mod = .{ .rm = .{ + .size = self.memSize(ty), .disp = frame_addr.off, - }, - else => unreachable, + } }, }, - )), + else => unreachable, + }), .memory, .load_symbol, .load_direct, .load_got, .load_tlv => { switch (src_mcv) { .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr| @@ -12555,20 +12604,40 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr ty, dst_reg.class(), ty.abiAlignment(mod).check(@as(u32, @bitCast(small_addr))), - )).read(self, registerAlias(dst_reg, abi_size), Memory.sib( - self.memPtrSize(ty), - .{ .base = .{ .reg = .ds }, .disp = small_addr }, - )), + )).read(self, registerAlias(dst_reg, abi_size), .{ + .base = .{ .reg = .ds }, + .mod = .{ .rm = .{ + .size = self.memSize(ty), + .disp = small_addr, + } }, + }), + .load_symbol => |sym_off| switch (dst_reg.class()) { + .general_purpose => { + assert(sym_off.off == 0); + try self.asmRegisterMemory(.{ ._, .mov }, registerAlias(dst_reg, abi_size), .{ + .base = .{ .reloc = .{ + .atom_index = try self.owner.getSymbolIndex(self), + .sym_index = sym_off.sym, + } }, + .mod = .{ .rm = .{ + .size = self.memSize(ty), + .disp = sym_off.off, + } }, + }); + return; + }, + .segment, .mmx => unreachable, + .x87, .sse => {}, + }, .load_direct => |sym_index| switch (dst_reg.class()) { .general_purpose => { - const atom_index = try self.owner.getSymbolIndex(self); _ = try self.addInst(.{ .tag = .mov, .ops = .direct_reloc, .data = .{ .rx = .{ - .r1 = dst_reg.to64(), - .payload = try self.addExtra(Mir.Reloc{ - .atom_index = atom_index, + .r1 = registerAlias(dst_reg, abi_size), + .payload = try self.addExtra(bits.Symbol{ + .atom_index = try self.owner.getSymbolIndex(self), .sym_index = sym_index, }), } }, @@ -12578,7 +12647,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .segment, .mmx => unreachable, .x87, .sse => {}, }, - .load_symbol, .load_got, .load_tlv => {}, + .load_got, .load_tlv => {}, else => unreachable, } @@ -12589,22 +12658,42 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr try (try self.moveStrategy(ty, dst_reg.class(), false)).read( self, registerAlias(dst_reg, abi_size), - Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .{ .reg = addr_reg } }), + .{ + .base = .{ .reg = addr_reg }, + .mod = .{ .rm = .{ .size = Memory.Size.fromSize(abi_size) } }, + }, ); }, - .lea_symbol, .lea_direct, .lea_got => |sym_index| { + .lea_symbol => |sym_index| { const atom_index = try self.owner.getSymbolIndex(self); if (self.bin_file.cast(link.File.Elf)) |_| { - _ = try self.addInst(.{ - .tag = .lea, - .ops = .linker_reloc, - .data = .{ .rx = .{ - .r1 = dst_reg.to64(), - .payload = try self.addExtra(Mir.Reloc{ + try self.asmRegisterMemory( + .{ ._, .lea }, + dst_reg.to64(), + .{ + .base = .{ .reloc = .{ .atom_index = atom_index, - .sym_index = sym_index, - }), + .sym_index = sym_index.sym, + } }, + .mod = .{ .rm = .{ + .size = .qword, + .disp = sym_index.off, + } }, + }, + ); + } else return self.fail("TODO emit symbol sequence on {s}", .{ + @tagName(self.bin_file.tag), + }); + }, + .lea_direct, .lea_got => |sym_index| { + const atom_index = try self.owner.getSymbolIndex(self); + if (self.bin_file.cast(link.File.Elf)) |_| { + try self.asmRegisterMemory(.{ ._, .lea }, dst_reg.to64(), .{ + .base = .{ .reloc = .{ + .atom_index = atom_index, + .sym_index = sym_index, } }, + .mod = .{ .rm = .{ .size = .qword } }, }); } else { _ = try self.addInst(.{ @@ -12620,7 +12709,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr }, .data = .{ .rx = .{ .r1 = dst_reg.to64(), - .payload = try self.addExtra(Mir.Reloc{ + .payload = try self.addExtra(bits.Symbol{ .atom_index = atom_index, .sym_index = sym_index, }), @@ -12636,14 +12725,17 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .ops = .tlv_reloc, .data = .{ .rx = .{ .r1 = .rdi, - .payload = try self.addExtra(Mir.Reloc{ + .payload = try self.addExtra(bits.Symbol{ .atom_index = atom_index, .sym_index = sym_index, }), } }, }); // TODO: spill registers before calling - try self.asmMemory(.{ ._, .call }, Memory.sib(.qword, .{ .base = .{ .reg = .rdi } })); + try self.asmMemory(.{ ._, .call }, .{ + .base = .{ .reg = .rdi }, + .mod = .{ .rm = .{ .size = .qword } }, + }); try self.genSetReg(dst_reg.to64(), Type.usize, .{ .register = .rax }); } else return self.fail("TODO emit ptr to TLV sequence on {s}", .{ @tagName(self.bin_file.tag), @@ -12660,6 +12752,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .none => .{ .immediate = @bitCast(@as(i64, disp)) }, .reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } }, .frame => |base_frame_index| .{ .lea_frame = .{ .index = base_frame_index, .off = disp } }, + .reloc => |base_symbol| .{ .lea_symbol = .{ .sym = base_symbol.sym_index, .off = disp } }, }; switch (src_mcv) { .none, .unreach, .dead, .reserved_frame => unreachable, @@ -12675,7 +12768,10 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal }; try self.asmMemoryImmediate( .{ ._, .mov }, - Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base, .disp = disp }), + .{ .base = base, .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(abi_size), + .disp = disp, + } } }, immediate, ); }, @@ -12683,27 +12779,31 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal else => if (math.cast(i32, @as(i64, @bitCast(imm)))) |small| { try self.asmMemoryImmediate( .{ ._, .mov }, - Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base, .disp = disp }), + .{ .base = base, .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(abi_size), + .disp = disp, + } } }, Immediate.s(small), ); } else { var offset: i32 = 0; while (offset < abi_size) : (offset += 4) try self.asmMemoryImmediate( .{ ._, .mov }, - Memory.sib(.dword, .{ .base = base, .disp = disp + offset }), - if (ty.isSignedInt(mod)) - Immediate.s( - @truncate(@as(i64, @bitCast(imm)) >> (math.cast(u6, offset * 8) orelse 63)), - ) - else - Immediate.u(@as( - u32, - @truncate(if (math.cast(u6, offset * 8)) |shift| imm >> shift else 0), - )), + .{ .base = base, .mod = .{ .rm = .{ + .size = .dword, + .disp = disp + offset, + } } }, + if (ty.isSignedInt(mod)) Immediate.s( + @truncate(@as(i64, @bitCast(imm)) >> (math.cast(u6, offset * 8) orelse 63)), + ) else Immediate.u( + @as(u32, @truncate(if (math.cast(u6, offset * 8)) |shift| imm >> shift else 0)), + ), ); }, }, - .eflags => |cc| try self.asmSetccMemory(cc, Memory.sib(.byte, .{ .base = base, .disp = disp })), + .eflags => |cc| try self.asmSetccMemory(cc, .{ .base = base, .mod = .{ + .rm = .{ .size = .byte, .disp = disp }, + } }), .register => |src_reg| try (try self.moveStrategy(ty, src_reg.class(), switch (base) { .none => ty.abiAlignment(mod).check(@as(u32, @bitCast(disp))), .reg => |reg| switch (reg) { @@ -12713,9 +12813,13 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .frame => |frame_index| self.getFrameAddrAlignment( .{ .index = frame_index, .off = disp }, ).compare(.gte, ty.abiAlignment(mod)), + .reloc => false, })).write( self, - Memory.sib(self.memPtrSize(ty), .{ .base = base, .disp = disp }), + .{ .base = base, .mod = .{ .rm = .{ + .size = self.memSize(ty), + .disp = disp, + } } }, registerAlias(src_reg, abi_size), ), .register_pair => |src_regs| for (src_regs, 0..) |src_reg, src_reg_i| { @@ -12732,11 +12836,12 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .frame => |frame_index| self.getFrameAddrAlignment( .{ .index = frame_index, .off = disp }, ).compare(.gte, ty.abiAlignment(mod)), + .reloc => false, }, - )).write(self, Memory.sib( - Memory.PtrSize.fromSize(part_size), - .{ .base = base, .disp = disp + @as(i32, @intCast(src_reg_i * 8)) }, - ), registerAlias(src_reg, part_size)); + )).write(self, .{ .base = base, .mod = .{ .rm = .{ + .size = Memory.Size.fromSize(part_size), + .disp = disp + @as(i32, @intCast(src_reg_i * 8)), + } } }, registerAlias(src_reg, part_size)); }, .register_overflow => |ro| switch (ty.zigTypeTag(mod)) { .Struct => { @@ -12834,7 +12939,7 @@ fn genExternSymbolRef( .ops = .import_reloc, .data = .{ .rx = .{ .r1 = .rax, - .payload = try self.addExtra(Mir.Reloc{ + .payload = try self.addExtra(bits.Symbol{ .atom_index = atom_index, .sym_index = try coff_file.getGlobalSymbol(callee, lib), }), @@ -12871,8 +12976,12 @@ fn genLazySymbolRef( if (self.bin_file.options.pic) { switch (tag) { - .lea, .call => try self.genSetReg(reg, Type.usize, .{ .load_symbol = sym.esym_index }), - .mov => try self.genSetReg(reg, Type.usize, .{ .load_symbol = sym.esym_index }), + .lea, .call => try self.genSetReg(reg, Type.usize, .{ + .load_symbol = .{ .sym = sym.esym_index }, + }), + .mov => try self.genSetReg(reg, Type.usize, .{ + .load_symbol = .{ .sym = sym.esym_index }, + }), else => unreachable, } switch (tag) { @@ -12881,23 +12990,18 @@ fn genLazySymbolRef( else => unreachable, } } else { - const reloc = Mir.Reloc{ + const reloc = bits.Symbol{ .atom_index = try self.owner.getSymbolIndex(self), .sym_index = sym.esym_index, }; switch (tag) { - .lea, .mov => _ = try self.addInst(.{ - .tag = .mov, - .ops = .linker_reloc, - .data = .{ .rx = .{ - .r1 = reg.to64(), - .payload = try self.addExtra(reloc), - } }, + .lea, .mov => try self.asmRegisterMemory(.{ ._, .mov }, reg.to64(), .{ + .base = .{ .reloc = reloc }, + .mod = .{ .rm = .{ .size = .qword } }, }), - .call => _ = try self.addInst(.{ - .tag = .call, - .ops = .linker_reloc, - .data = .{ .reloc = reloc }, + .call => try self.asmMemory(.{ ._, .call }, .{ + .base = .{ .reloc = reloc }, + .mod = .{ .rm = .{ .size = .qword } }, }), else => unreachable, } @@ -12908,8 +13012,13 @@ fn genLazySymbolRef( var atom = p9_file.getAtom(atom_index); _ = atom.getOrCreateOffsetTableEntry(p9_file); const got_addr = atom.getOffsetTableAddress(p9_file); - const got_mem = - Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(got_addr) }); + const got_mem: Memory = .{ + .base = .{ .reg = .ds }, + .mod = .{ .rm = .{ + .size = .qword, + .disp = @intCast(got_addr), + } }, + }; switch (tag) { .lea, .mov => try self.asmRegisterMemory(.{ ._, .mov }, reg.to64(), got_mem), .call => try self.asmMemory(.{ ._, .call }, got_mem), @@ -13255,19 +13364,20 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { defer if (new_lock) |lock| self.register_manager.unlockReg(lock); const ptr_mcv = try self.resolveInst(extra.ptr); - const ptr_size = Memory.PtrSize.fromSize(val_abi_size); - const ptr_mem = switch (ptr_mcv) { - .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(ptr_size), - else => Memory.sib(ptr_size, .{ + const mem_size = Memory.Size.fromSize(val_abi_size); + const ptr_mem: Memory = switch (ptr_mcv) { + .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(mem_size), + else => .{ .base = .{ .reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv) }, - }), + .mod = .{ .rm = .{ .size = mem_size } }, + }, }; - switch (ptr_mem) { - .sib, .rip => {}, - .moffs => return self.fail("TODO airCmpxchg with {s}", .{@tagName(ptr_mcv)}), + switch (ptr_mem.mod) { + .rm => {}, + .off => return self.fail("TODO airCmpxchg with {s}", .{@tagName(ptr_mcv)}), } - const ptr_lock = switch (ptr_mem.base()) { - .none, .frame => null, + const ptr_lock = switch (ptr_mem.base) { + .none, .frame, .reloc => null, .reg => |reg| self.register_manager.lockReg(reg), }; defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); @@ -13320,19 +13430,20 @@ fn atomicOp( defer if (val_lock) |lock| self.register_manager.unlockReg(lock); const val_abi_size: u32 = @intCast(val_ty.abiSize(mod)); - const ptr_size = Memory.PtrSize.fromSize(val_abi_size); - const ptr_mem = switch (ptr_mcv) { - .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(ptr_size), - else => Memory.sib(ptr_size, .{ + const mem_size = Memory.Size.fromSize(val_abi_size); + const ptr_mem: Memory = switch (ptr_mcv) { + .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(mem_size), + else => .{ .base = .{ .reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv) }, - }), + .mod = .{ .rm = .{ .size = mem_size } }, + }, }; - switch (ptr_mem) { - .sib, .rip => {}, - .moffs => return self.fail("TODO airCmpxchg with {s}", .{@tagName(ptr_mcv)}), + switch (ptr_mem.mod) { + .rm => {}, + .off => return self.fail("TODO airCmpxchg with {s}", .{@tagName(ptr_mcv)}), } - const mem_lock = switch (ptr_mem.base()) { - .none, .frame => null, + const mem_lock = switch (ptr_mem.base) { + .none, .frame, .reloc => null, .reg => |reg| self.register_manager.lockReg(reg), }; defer if (mem_lock) |lock| self.register_manager.unlockReg(lock); @@ -13440,7 +13551,7 @@ fn atomicOp( .memory, .indirect, .load_frame => try self.asmCmovccRegisterMemory( cc, registerAlias(tmp_reg, cmov_abi_size), - val_mcv.mem(Memory.PtrSize.fromSize(cmov_abi_size)), + val_mcv.mem(Memory.Size.fromSize(cmov_abi_size)), ), else => { const val_reg = try self.copyToTmpRegister(val_ty, val_mcv); @@ -13461,16 +13572,24 @@ fn atomicOp( _ = try self.asmJccReloc(.ne, loop); return if (unused) .unreach else .{ .register = .rax }; } else { - try self.asmRegisterMemory(.{ ._, .mov }, .rax, Memory.sib(.qword, .{ - .base = ptr_mem.sib.base, - .scale_index = ptr_mem.scaleIndex(), - .disp = ptr_mem.sib.disp + 0, - })); - try self.asmRegisterMemory(.{ ._, .mov }, .rdx, Memory.sib(.qword, .{ - .base = ptr_mem.sib.base, - .scale_index = ptr_mem.scaleIndex(), - .disp = ptr_mem.sib.disp + 8, - })); + try self.asmRegisterMemory(.{ ._, .mov }, .rax, .{ + .base = ptr_mem.base, + .mod = .{ .rm = .{ + .size = .qword, + .index = ptr_mem.mod.rm.index, + .scale = ptr_mem.mod.rm.scale, + .disp = ptr_mem.mod.rm.disp + 0, + } }, + }); + try self.asmRegisterMemory(.{ ._, .mov }, .rdx, .{ + .base = ptr_mem.base, + .mod = .{ .rm = .{ + .size = .qword, + .index = ptr_mem.mod.rm.index, + .scale = ptr_mem.mod.rm.scale, + .disp = ptr_mem.mod.rm.disp + 8, + } }, + }); const loop: Mir.Inst.Index = @intCast(self.mir_instructions.len); const val_mem_mcv: MCValue = switch (val_mcv) { .memory, .indirect, .load_frame => val_mcv, @@ -13524,22 +13643,20 @@ fn atomicOp( if (unused) return .unreach; const dst_mcv = try self.allocTempRegOrMem(val_ty, false); - try self.asmMemoryRegister( - .{ ._, .mov }, - Memory.sib(.qword, .{ - .base = .{ .frame = dst_mcv.load_frame.index }, + try self.asmMemoryRegister(.{ ._, .mov }, .{ + .base = .{ .frame = dst_mcv.load_frame.index }, + .mod = .{ .rm = .{ + .size = .qword, .disp = dst_mcv.load_frame.off + 0, - }), - .rax, - ); - try self.asmMemoryRegister( - .{ ._, .mov }, - Memory.sib(.qword, .{ - .base = .{ .frame = dst_mcv.load_frame.index }, + } }, + }, .rax); + try self.asmMemoryRegister(.{ ._, .mov }, .{ + .base = .{ .frame = dst_mcv.load_frame.index }, + .mod = .{ .rm = .{ + .size = .qword, .disp = dst_mcv.load_frame.off + 8, - }), - .rdx, - ); + } }, + }, .rdx); return dst_mcv; }, .libcall => return self.fail("TODO implement x86 atomic libcall", .{}), @@ -13845,55 +13962,74 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { try self.asmRegisterMemory( .{ ._, .mov }, start_reg.to32(), - Memory.sib(.dword, .{ + .{ .base = .{ .reg = addr_reg.to64() }, - .scale_index = .{ .scale = 4, .index = err_reg.to64() }, - .disp = 4, - }), + .mod = .{ .rm = .{ + .size = .dword, + .index = err_reg.to64(), + .scale = .@"4", + .disp = 4, + } }, + }, ); try self.asmRegisterMemory( .{ ._, .mov }, end_reg.to32(), - Memory.sib(.dword, .{ + .{ .base = .{ .reg = addr_reg.to64() }, - .scale_index = .{ .scale = 4, .index = err_reg.to64() }, - .disp = 8, - }), + .mod = .{ .rm = .{ + .size = .dword, + .index = err_reg.to64(), + .scale = .@"4", + .disp = 8, + } }, + }, ); try self.asmRegisterRegister(.{ ._, .sub }, end_reg.to32(), start_reg.to32()); try self.asmRegisterMemory( .{ ._, .lea }, start_reg.to64(), - Memory.sib(.byte, .{ + .{ .base = .{ .reg = addr_reg.to64() }, - .scale_index = .{ .scale = 1, .index = start_reg.to64() }, - .disp = 0, - }), + .mod = .{ .rm = .{ + .size = .dword, + .index = start_reg.to64(), + } }, + }, ); try self.asmRegisterMemory( .{ ._, .lea }, end_reg.to32(), - Memory.sib(.byte, .{ + .{ .base = .{ .reg = end_reg.to64() }, - .disp = -1, - }), + .mod = .{ .rm = .{ + .size = .byte, + .disp = -1, + } }, + }, ); const dst_mcv = try self.allocRegOrMem(inst, false); try self.asmMemoryRegister( .{ ._, .mov }, - Memory.sib(.qword, .{ + .{ .base = .{ .frame = dst_mcv.load_frame.index }, - .disp = dst_mcv.load_frame.off, - }), + .mod = .{ .rm = .{ + .size = .qword, + .disp = dst_mcv.load_frame.off, + } }, + }, start_reg.to64(), ); try self.asmMemoryRegister( .{ ._, .mov }, - Memory.sib(.qword, .{ + .{ .base = .{ .frame = dst_mcv.load_frame.index }, - .disp = dst_mcv.load_frame.off + 8, - }), + .mod = .{ .rm = .{ + .size = .qword, + .disp = dst_mcv.load_frame.off + 8, + } }, + }, end_reg.to64(), ); @@ -14130,7 +14266,7 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { .Or => { if (operand_mcv.isMemory()) try self.asmMemoryImmediate( .{ ._, .@"test" }, - operand_mcv.mem(Memory.PtrSize.fromSize(abi_size)), + operand_mcv.mem(Memory.Size.fromSize(abi_size)), Immediate.u(mask), ) else { const operand_reg = registerAlias(if (operand_mcv.isRegister()) @@ -14522,7 +14658,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { mir_tag, mop1_reg, mop2_reg, - mops[2].mem(Memory.PtrSize.fromSize(abi_size)), + mops[2].mem(Memory.Size.fromSize(abi_size)), ); break :result mops[0]; }; @@ -14633,24 +14769,32 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void { const mem_reloc = try self.asmJccReloc(.ae, undefined); try self.genSetReg(addr_reg, ptr_anyopaque_ty, reg_save_area); - if (!unused) - try self.asmRegisterMemory(.{ ._, .lea }, addr_reg, Memory.sib(.qword, .{ - .base = .{ .reg = addr_reg }, - .scale_index = .{ .scale = 1, .index = offset_reg.to64() }, - })); - try self.asmRegisterMemory(.{ ._, .lea }, offset_reg, Memory.sib(.qword, .{ + if (!unused) try self.asmRegisterMemory(.{ ._, .lea }, addr_reg, .{ + .base = .{ .reg = addr_reg }, + .mod = .{ .rm = .{ + .size = .qword, + .index = offset_reg.to64(), + } }, + }); + try self.asmRegisterMemory(.{ ._, .lea }, offset_reg, .{ .base = .{ .reg = offset_reg.to64() }, - .disp = 8, - })); + .mod = .{ .rm = .{ + .size = .qword, + .disp = 8, + } }, + }); try self.genCopy(Type.c_uint, gp_offset, .{ .register = offset_reg }); const done_reloc = try self.asmJmpReloc(undefined); try self.performReloc(mem_reloc); try self.genSetReg(addr_reg, ptr_anyopaque_ty, overflow_arg_area); - try self.asmRegisterMemory(.{ ._, .lea }, offset_reg.to64(), Memory.sib(.qword, .{ + try self.asmRegisterMemory(.{ ._, .lea }, offset_reg.to64(), .{ .base = .{ .reg = addr_reg }, - .disp = @intCast(@max(promote_ty.abiSize(mod), 8)), - })); + .mod = .{ .rm = .{ + .size = .qword, + .disp = @intCast(@max(promote_ty.abiSize(mod), 8)), + } }, + }); try self.genCopy( ptr_anyopaque_ty, overflow_arg_area, @@ -14672,24 +14816,32 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void { const mem_reloc = try self.asmJccReloc(.ae, undefined); try self.genSetReg(addr_reg, ptr_anyopaque_ty, reg_save_area); - if (!unused) - try self.asmRegisterMemory(.{ ._, .lea }, addr_reg, Memory.sib(.qword, .{ - .base = .{ .reg = addr_reg }, - .scale_index = .{ .scale = 1, .index = offset_reg.to64() }, - })); - try self.asmRegisterMemory(.{ ._, .lea }, offset_reg, Memory.sib(.qword, .{ + if (!unused) try self.asmRegisterMemory(.{ ._, .lea }, addr_reg, .{ + .base = .{ .reg = addr_reg }, + .mod = .{ .rm = .{ + .size = .qword, + .index = offset_reg.to64(), + } }, + }); + try self.asmRegisterMemory(.{ ._, .lea }, offset_reg, .{ .base = .{ .reg = offset_reg.to64() }, - .disp = 16, - })); + .mod = .{ .rm = .{ + .size = .qword, + .disp = 16, + } }, + }); try self.genCopy(Type.c_uint, fp_offset, .{ .register = offset_reg }); const done_reloc = try self.asmJmpReloc(undefined); try self.performReloc(mem_reloc); try self.genSetReg(addr_reg, ptr_anyopaque_ty, overflow_arg_area); - try self.asmRegisterMemory(.{ ._, .lea }, offset_reg.to64(), Memory.sib(.qword, .{ + try self.asmRegisterMemory(.{ ._, .lea }, offset_reg.to64(), .{ .base = .{ .reg = addr_reg }, - .disp = @intCast(@max(promote_ty.abiSize(mod), 8)), - })); + .mod = .{ .rm = .{ + .size = .qword, + .disp = @intCast(@max(promote_ty.abiSize(mod), 8)), + } }, + }); try self.genCopy( ptr_anyopaque_ty, overflow_arg_area, @@ -14838,7 +14990,7 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { .undef => .undef, .immediate => |imm| .{ .immediate = imm }, .memory => |addr| .{ .memory = addr }, - .load_symbol => |sym_index| .{ .load_symbol = sym_index }, + .load_symbol => |sym_index| .{ .load_symbol = .{ .sym = sym_index } }, .load_direct => |sym_index| .{ .load_direct = sym_index }, .load_got => |sym_index| .{ .lea_got = sym_index }, .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, @@ -15182,11 +15334,11 @@ fn registerAlias(reg: Register, size_bytes: u32) Register { }; } -fn memPtrSize(self: *Self, ty: Type) Memory.PtrSize { +fn memSize(self: *Self, ty: Type) Memory.Size { const mod = self.bin_file.options.module.?; return switch (ty.zigTypeTag(mod)) { - .Float => Memory.PtrSize.fromBitSize(ty.floatBits(self.target.*)), - else => Memory.PtrSize.fromSize(@intCast(ty.abiSize(mod))), + .Float => Memory.Size.fromBitSize(ty.floatBits(self.target.*)), + else => Memory.Size.fromSize(@intCast(ty.abiSize(mod))), }; } |
