aboutsummaryrefslogtreecommitdiff
path: root/src/codegen/x86_64/CodeGen.zig
diff options
context:
space:
mode:
authormlugg <mlugg@mlugg.co.uk>2025-09-27 11:18:33 +0100
committerJacob Young <jacobly0@users.noreply.github.com>2025-09-27 18:30:52 -0400
commit611c38e6daffc18bc044ebb1e20d161b4ef757f2 (patch)
treef825f464e0bfcfdaf3d4894a7006af8edfb62dfd /src/codegen/x86_64/CodeGen.zig
parent77fca1652fce295fe0dd7c97432c23b0c4483724 (diff)
downloadzig-611c38e6daffc18bc044ebb1e20d161b4ef757f2.tar.gz
zig-611c38e6daffc18bc044ebb1e20d161b4ef757f2.zip
x86_64: fix unencodable `rem` lowerings
The memory operand might use one of the extended GPRs R8 through R15 and hence require a REX prefix, but having a REX prefix makes the high-byte register AH unencodeable as the src operand. This latent bug was exposed by this branch, presumably because `select` now happens to be putting something in an extended GPR instead of a legacy GPR. In theory this could be fixed with minimal cost by introducing a way to communicate to `select` that neither the destination memory nor the other temporary can be in an extended GPR. However, I just went for the simple solution which comes at a cost of one trivial instruction: copy the remainder from AH to AL, and *then* copy AL to the destination.
Diffstat (limited to 'src/codegen/x86_64/CodeGen.zig')
-rw-r--r--src/codegen/x86_64/CodeGen.zig12
1 files changed, 8 insertions, 4 deletions
diff --git a/src/codegen/x86_64/CodeGen.zig b/src/codegen/x86_64/CodeGen.zig
index 9eb33931b3..ffc09f2cb7 100644
--- a/src/codegen/x86_64/CodeGen.zig
+++ b/src/codegen/x86_64/CodeGen.zig
@@ -37912,7 +37912,8 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_unaligned_size), ._, ._ },
.{ .@"0:", ._, .movsx, .tmp1d, .memia(.src0b, .tmp0, .add_unaligned_size), ._, ._ },
.{ ._, .i_, .div, .memia(.src1b, .tmp0, .add_unaligned_size), ._, ._, ._ },
- .{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_unaligned_size), .tmp1h, ._, ._ },
+ .{ ._, ._, .movsx, .tmp1d, .tmp1h, ._, ._ },
+ .{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_unaligned_size), .tmp1l, ._, ._ },
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
} },
@@ -37944,7 +37945,8 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_unaligned_size), ._, ._ },
.{ .@"0:", ._, .movsx, .tmp1d, .memia(.src0b, .tmp0, .add_unaligned_size), ._, ._ },
.{ ._, .i_, .div, .memia(.src1b, .tmp0, .add_unaligned_size), ._, ._, ._ },
- .{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_unaligned_size), .tmp1h, ._, ._ },
+ .{ ._, ._, .movsx, .tmp1d, .tmp1h, ._, ._ },
+ .{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_unaligned_size), .tmp1l, ._, ._ },
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
} },
@@ -37977,7 +37979,8 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_unaligned_size), ._, ._ },
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0b, .tmp0, .add_unaligned_size), ._, ._ },
.{ ._, ._, .div, .memia(.src1b, .tmp0, .add_unaligned_size), ._, ._, ._ },
- .{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_unaligned_size), .tmp1h, ._, ._ },
+ .{ ._, ._, .movzx, .tmp1d, .tmp1h, ._, ._ },
+ .{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_unaligned_size), .tmp1l, ._, ._ },
.{ ._, ._, .add, .tmp0p, .si(1), ._, ._ },
.{ ._, ._nc, .j, .@"0b", ._, ._, ._ },
} },
@@ -38009,7 +38012,8 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .mov, .tmp0p, .sa(.src0, .sub_unaligned_size), ._, ._ },
.{ .@"0:", ._, .movzx, .tmp1d, .memia(.src0b, .tmp0, .add_unaligned_size), ._, ._ },
.{ ._, ._, .div, .memia(.src1b, .tmp0, .add_unaligned_size), ._, ._, ._ },
- .{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_unaligned_size), .tmp1h, ._, ._ },
+ .{ ._, ._, .movzx, .tmp1d, .tmp1h, ._, ._ },
+ .{ ._, ._, .mov, .memia(.dst0b, .tmp0, .add_unaligned_size), .tmp1l, ._, ._ },
.{ ._, ._c, .in, .tmp0p, ._, ._, ._ },
.{ ._, ._nz, .j, .@"0b", ._, ._, ._ },
} },