aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/arch/wasm/CodeGen.zig69
-rw-r--r--src/arch/x86_64/CodeGen.zig116
-rw-r--r--src/codegen.zig25
-rw-r--r--src/link/Elf.zig1
4 files changed, 142 insertions, 69 deletions
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index d86e0069cf..7d2046b90b 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -619,7 +619,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!WValue {
.code = &value_bytes,
.symbol_index = try self.bin_file.createLocalSymbol(self.decl, ty),
};
- const result = decl_gen.genTypedValue(ty, val, value_bytes.writer()) catch |err| {
+ const result = decl_gen.genTypedValue(ty, val) catch |err| {
// When a codegen error occured, take ownership of the error message
if (err == error.CodegenFail) {
self.err_msg = decl_gen.err_msg;
@@ -907,14 +907,15 @@ pub const DeclGen = struct {
break :init_val payload.data.init;
} else decl.val;
if (init_val.tag() != .unreachable_value) {
- return self.genTypedValue(decl.ty, init_val, self.code.writer());
+ return self.genTypedValue(decl.ty, init_val);
}
return Result{ .appended = {} };
}
}
/// Generates the wasm bytecode for the declaration belonging to `Context`
- fn genTypedValue(self: *DeclGen, ty: Type, val: Value, writer: anytype) InnerError!Result {
+ fn genTypedValue(self: *DeclGen, ty: Type, val: Value) InnerError!Result {
+ const writer = self.code.writer();
if (val.isUndef()) {
try writer.writeByteNTimes(0xaa, @intCast(usize, ty.abiSize(self.target())));
return Result{ .appended = {} };
@@ -926,7 +927,7 @@ pub const DeclGen = struct {
.function => val.castTag(.function).?.data.owner_decl,
else => unreachable,
};
- return try self.lowerDeclRef(ty, val, fn_decl, writer);
+ return try self.lowerDeclRef(ty, val, fn_decl);
},
.Optional => {
var opt_buf: Type.Payload.ElemType = undefined;
@@ -942,9 +943,9 @@ pub const DeclGen = struct {
if (ty.isPtrLikeOptional()) {
if (val.castTag(.opt_payload)) |payload| {
- return self.genTypedValue(payload_type, payload.data, writer);
+ return self.genTypedValue(payload_type, payload.data);
} else if (!val.isNull()) {
- return self.genTypedValue(payload_type, val, writer);
+ return self.genTypedValue(payload_type, val);
} else {
try writer.writeByteNTimes(0, abi_size);
return Result{ .appended = {} };
@@ -956,7 +957,6 @@ pub const DeclGen = struct {
switch (try self.genTypedValue(
payload_type,
if (val.castTag(.opt_payload)) |pl| pl.data else Value.initTag(.undef),
- writer,
)) {
.appended => {},
.externally_managed => |payload| try writer.writeAll(payload),
@@ -972,7 +972,7 @@ pub const DeclGen = struct {
const elem_vals = val.castTag(.array).?.data;
const elem_ty = ty.childType();
for (elem_vals) |elem_val| {
- switch (try self.genTypedValue(elem_ty, elem_val, writer)) {
+ switch (try self.genTypedValue(elem_ty, elem_val)) {
.appended => {},
.externally_managed => |data| try writer.writeAll(data),
}
@@ -987,20 +987,20 @@ pub const DeclGen = struct {
var index: u32 = 0;
while (index < len) : (index += 1) {
- switch (try self.genTypedValue(elem_ty, array, writer)) {
+ switch (try self.genTypedValue(elem_ty, array)) {
.externally_managed => |data| try writer.writeAll(data),
.appended => {},
}
}
if (sentinel) |sentinel_value| {
- return self.genTypedValue(elem_ty, sentinel_value, writer);
+ return self.genTypedValue(elem_ty, sentinel_value);
}
return Result{ .appended = {} };
},
.empty_array_sentinel => {
const elem_ty = ty.childType();
const sent_val = ty.sentinel().?;
- return self.genTypedValue(elem_ty, sent_val, writer);
+ return self.genTypedValue(elem_ty, sent_val);
},
else => unreachable,
},
@@ -1037,25 +1037,37 @@ pub const DeclGen = struct {
const int_val = val.enumToInt(ty, &int_buffer);
var buf: Type.Payload.Bits = undefined;
const int_ty = ty.intTagType(&buf);
- return self.genTypedValue(int_ty, int_val, writer);
+ return self.genTypedValue(int_ty, int_val);
},
.Bool => {
try writer.writeByte(@boolToInt(val.toBool()));
return Result{ .appended = {} };
},
.Struct => {
- const struct_ty = ty.castTag(.@"struct").?.data;
- if (struct_ty.layout == .Packed) {
+ const struct_obj = ty.castTag(.@"struct").?.data;
+ if (struct_obj.layout == .Packed) {
return self.fail("TODO: Packed structs for wasm", .{});
}
+
+ const struct_begin = self.code.items.len;
const field_vals = val.castTag(.@"struct").?.data;
for (field_vals) |field_val, index| {
const field_ty = ty.structFieldType(index);
if (!field_ty.hasRuntimeBits()) continue;
- switch (try self.genTypedValue(field_ty, field_val, writer)) {
+
+ switch (try self.genTypedValue(field_ty, field_val)) {
.appended => {},
.externally_managed => |payload| try writer.writeAll(payload),
}
+ const unpadded_field_len = self.code.items.len - struct_begin;
+
+ // Pad struct members if required
+ const padded_field_end = ty.structFieldOffset(index + 1, self.target());
+ const padding = try std.math.cast(usize, padded_field_end - unpadded_field_len);
+
+ if (padding > 0) {
+ try writer.writeByteNTimes(0, padding);
+ }
}
return Result{ .appended = {} };
},
@@ -1064,12 +1076,12 @@ pub const DeclGen = struct {
const layout = ty.unionGetLayout(self.target());
if (layout.payload_size == 0) {
- return self.genTypedValue(ty.unionTagType().?, union_val.tag, writer);
+ return self.genTypedValue(ty.unionTagType().?, union_val.tag);
}
// Check if we should store the tag first, in which case, do so now:
if (layout.tag_align >= layout.payload_align) {
- switch (try self.genTypedValue(ty.unionTagType().?, union_val.tag, writer)) {
+ switch (try self.genTypedValue(ty.unionTagType().?, union_val.tag)) {
.appended => {},
.externally_managed => |payload| try writer.writeAll(payload),
}
@@ -1082,7 +1094,7 @@ pub const DeclGen = struct {
if (!field_ty.hasRuntimeBits()) {
try writer.writeByteNTimes(0xaa, @intCast(usize, layout.payload_size));
} else {
- switch (try self.genTypedValue(field_ty, union_val.val, writer)) {
+ switch (try self.genTypedValue(field_ty, union_val.val)) {
.appended => {},
.externally_managed => |payload| try writer.writeAll(payload),
}
@@ -1098,26 +1110,26 @@ pub const DeclGen = struct {
if (layout.tag_size == 0) {
return Result{ .appended = {} };
}
- return self.genTypedValue(union_ty.tag_ty, union_val.tag, writer);
+ return self.genTypedValue(union_ty.tag_ty, union_val.tag);
},
.Pointer => switch (val.tag()) {
.variable => {
const decl = val.castTag(.variable).?.data.owner_decl;
- return self.lowerDeclRef(ty, val, decl, writer);
+ return self.lowerDeclRef(ty, val, decl);
},
.decl_ref => {
const decl = val.castTag(.decl_ref).?.data;
- return self.lowerDeclRef(ty, val, decl, writer);
+ return self.lowerDeclRef(ty, val, decl);
},
.slice => {
const slice = val.castTag(.slice).?.data;
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = ty.slicePtrFieldType(&buf);
- switch (try self.genTypedValue(ptr_ty, slice.ptr, writer)) {
+ switch (try self.genTypedValue(ptr_ty, slice.ptr)) {
.externally_managed => |data| try writer.writeAll(data),
.appended => {},
}
- switch (try self.genTypedValue(Type.usize, slice.len, writer)) {
+ switch (try self.genTypedValue(Type.usize, slice.len)) {
.externally_managed => |data| try writer.writeAll(data),
.appended => {},
}
@@ -1135,14 +1147,14 @@ pub const DeclGen = struct {
const is_pl = val.errorUnionIsPayload();
const err_val = if (!is_pl) val else Value.initTag(.zero);
- switch (try self.genTypedValue(error_ty, err_val, writer)) {
+ switch (try self.genTypedValue(error_ty, err_val)) {
.externally_managed => |data| try writer.writeAll(data),
.appended => {},
}
if (payload_ty.hasRuntimeBits()) {
const pl_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef);
- switch (try self.genTypedValue(payload_ty, pl_val, writer)) {
+ switch (try self.genTypedValue(payload_ty, pl_val)) {
.externally_managed => |data| try writer.writeAll(data),
.appended => {},
}
@@ -1167,11 +1179,12 @@ pub const DeclGen = struct {
}
}
- fn lowerDeclRef(self: *DeclGen, ty: Type, val: Value, decl: *Module.Decl, writer: anytype) InnerError!Result {
+ fn lowerDeclRef(self: *DeclGen, ty: Type, val: Value, decl: *Module.Decl) InnerError!Result {
+ const writer = self.code.writer();
if (ty.isSlice()) {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const slice_ty = ty.slicePtrFieldType(&buf);
- switch (try self.genTypedValue(slice_ty, val, writer)) {
+ switch (try self.genTypedValue(slice_ty, val)) {
.appended => {},
.externally_managed => |payload| try writer.writeAll(payload),
}
@@ -1179,7 +1192,7 @@ pub const DeclGen = struct {
.base = .{ .tag = .int_u64 },
.data = val.sliceLen(),
};
- return self.genTypedValue(Type.usize, Value.initPayload(&slice_len.base), writer);
+ return self.genTypedValue(Type.usize, Value.initPayload(&slice_len.base));
}
decl.markAlive();
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 4d3f899cd5..80b50f228b 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -680,6 +680,9 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.wrap_errunion_err => try self.airWrapErrUnionErr(inst),
// zig fmt: on
}
+
+ assert(!self.register_manager.frozenRegsExist());
+
if (std.debug.runtime_safety) {
if (self.air_bookkeeping < old_air_bookkeeping + 1) {
std.debug.panic("in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. Look for a missing call to finishAir.", .{ inst, air_tags[inst] });
@@ -809,7 +812,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
const stack_mcv = try self.allocRegOrMem(inst, false);
log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv });
const reg_mcv = self.getResolvedInstValue(inst);
- assert(reg == reg_mcv.register.to64());
+ assert(reg.to64() == reg_mcv.register.to64());
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
try branch.inst_table.put(self.gpa, inst, stack_mcv);
try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv);
@@ -827,9 +830,9 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
/// Allocates a new register and copies `mcv` into it.
/// `reg_owner` is the instruction that gets associated with the register in the register table.
/// This can have a side effect of spilling instructions to the stack to free up a register.
-fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue {
+fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, ty: Type, mcv: MCValue) !MCValue {
const reg = try self.register_manager.allocReg(reg_owner, &.{});
- try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv);
+ try self.genSetReg(ty, reg, mcv);
return MCValue{ .register = reg };
}
@@ -838,11 +841,12 @@ fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCVa
fn copyToNewRegisterWithExceptions(
self: *Self,
reg_owner: Air.Inst.Index,
+ ty: Type,
mcv: MCValue,
exceptions: []const Register,
) !MCValue {
const reg = try self.register_manager.allocReg(reg_owner, exceptions);
- try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv);
+ try self.genSetReg(ty, reg, mcv);
return MCValue{ .register = reg };
}
@@ -892,13 +896,10 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
if (operand_abi_size > 8 or dest_abi_size > 8) {
return self.fail("TODO implement intCast for abi sizes larger than 8", .{});
}
- const reg = switch (operand) {
- .register => |src_reg| try self.register_manager.allocReg(inst, &.{src_reg}),
- else => try self.register_manager.allocReg(inst, &.{}),
- };
- try self.genSetReg(dest_ty, reg, .{ .immediate = 0 });
- try self.genSetReg(dest_ty, reg, operand);
- break :blk .{ .register = registerAlias(reg, @intCast(u32, dest_abi_size)) };
+
+ if (operand.isRegister()) self.register_manager.freezeRegs(&.{operand.register});
+ defer if (operand.isRegister()) self.register_manager.unfreezeRegs(&.{operand.register});
+ break :blk try self.copyToNewRegister(inst, dest_ty, operand);
};
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
@@ -1208,7 +1209,7 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
if (self.reuseOperand(inst, ty_op.operand, 0, operand)) {
break :result operand;
}
- break :result try self.copyToNewRegister(inst, operand);
+ break :result try self.copyToNewRegister(inst, self.air.typeOfIndex(inst), operand);
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -1479,16 +1480,11 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const index_ty = self.air.typeOf(extra.rhs);
const index = try self.resolveInst(extra.rhs);
const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size);
- const dst_mcv = blk: {
- switch (ptr) {
- .ptr_stack_offset => {
- const reg = try self.register_manager.allocReg(inst, &.{offset_reg});
- try self.genSetReg(ptr_ty, reg, ptr);
- break :blk .{ .register = reg };
- },
- else => return self.fail("TODO implement ptr_elem_ptr when ptr is {}", .{ptr}),
- }
- };
+
+ self.register_manager.freezeRegs(&.{offset_reg});
+ defer self.register_manager.unfreezeRegs(&.{offset_reg});
+
+ const dst_mcv = try self.copyToNewRegister(inst, ptr_ty, ptr);
try self.genBinMathOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg });
break :result dst_mcv;
};
@@ -1795,22 +1791,62 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
}
fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
- return if (self.liveness.isUnused(inst)) .dead else result: {
- const mcv = try self.resolveInst(operand);
- const struct_ty = self.air.typeOf(operand).childType();
- const struct_size = @intCast(i32, struct_ty.abiSize(self.target.*));
- const struct_field_offset = @intCast(i32, struct_ty.structFieldOffset(index, self.target.*));
- const struct_field_ty = struct_ty.structFieldType(index);
- const struct_field_size = @intCast(i32, struct_field_ty.abiSize(self.target.*));
-
+ if (self.liveness.isUnused(inst)) {
+ return MCValue.dead;
+ }
+ const mcv = try self.resolveInst(operand);
+ const ptr_ty = self.air.typeOf(operand);
+ const struct_ty = ptr_ty.childType();
+ const struct_size = @intCast(u32, struct_ty.abiSize(self.target.*));
+ const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
+ const struct_field_ty = struct_ty.structFieldType(index);
+ const struct_field_size = @intCast(u32, struct_field_ty.abiSize(self.target.*));
+
+ const dst_mcv: MCValue = result: {
switch (mcv) {
+ .stack_offset => {
+ const offset_reg = try self.copyToTmpRegister(ptr_ty, .{
+ .immediate = struct_field_offset,
+ });
+ self.register_manager.freezeRegs(&.{offset_reg});
+ defer self.register_manager.unfreezeRegs(&.{offset_reg});
+
+ const dst_mcv = try self.copyToNewRegister(inst, ptr_ty, mcv);
+ try self.genBinMathOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg });
+ break :result dst_mcv;
+ },
.ptr_stack_offset => |off| {
- const ptr_stack_offset = off + struct_size - struct_field_offset - struct_field_size;
+ const offset_to_field = struct_size - struct_field_offset - struct_field_size;
+ const ptr_stack_offset = off + @intCast(i32, offset_to_field);
break :result MCValue{ .ptr_stack_offset = ptr_stack_offset };
},
+ .register => |reg| {
+ const offset_reg = try self.copyToTmpRegister(ptr_ty, .{
+ .immediate = struct_field_offset,
+ });
+ self.register_manager.freezeRegs(&.{offset_reg});
+ defer self.register_manager.unfreezeRegs(&.{offset_reg});
+
+ const can_reuse_operand = self.reuseOperand(inst, operand, 0, mcv);
+ const result_reg = blk: {
+ if (can_reuse_operand) {
+ break :blk reg;
+ } else {
+ self.register_manager.freezeRegs(&.{reg});
+ const result_reg = try self.register_manager.allocReg(inst, &.{});
+ try self.genSetReg(ptr_ty, result_reg, mcv);
+ break :blk result_reg;
+ }
+ };
+ defer if (!can_reuse_operand) self.register_manager.unfreezeRegs(&.{reg});
+
+ try self.genBinMathOpMir(.add, ptr_ty, .{ .register = result_reg }, .{ .register = offset_reg });
+ break :result MCValue{ .register = result_reg };
+ },
else => return self.fail("TODO implement codegen struct_field_ptr for {}", .{mcv}),
}
};
+ return dst_mcv;
}
fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
@@ -1859,13 +1895,14 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs:
// Source operand can be an immediate, 8 bits or 32 bits.
// So, if either one of the operands dies with this instruction, we can use it
// as the result MCValue.
+ const dst_ty = self.air.typeOfIndex(inst);
var dst_mcv: MCValue = undefined;
var src_mcv: MCValue = undefined;
if (self.reuseOperand(inst, op_lhs, 0, lhs)) {
// LHS dies; use it as the destination.
// Both operands cannot be memory.
if (lhs.isMemory() and rhs.isMemory()) {
- dst_mcv = try self.copyToNewRegister(inst, lhs);
+ dst_mcv = try self.copyToNewRegister(inst, dst_ty, lhs);
src_mcv = rhs;
} else {
dst_mcv = lhs;
@@ -1875,7 +1912,7 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs:
// RHS dies; use it as the destination.
// Both operands cannot be memory.
if (lhs.isMemory() and rhs.isMemory()) {
- dst_mcv = try self.copyToNewRegister(inst, rhs);
+ dst_mcv = try self.copyToNewRegister(inst, dst_ty, rhs);
src_mcv = lhs;
} else {
dst_mcv = rhs;
@@ -1887,18 +1924,18 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs:
// If the allocated register is the same as the rhs register, don't allocate that one
// and instead spill a subsequent one. Otherwise, this can result in a miscompilation
// in the presence of several binary operations performed in a single block.
- try self.copyToNewRegisterWithExceptions(inst, lhs, &.{rhs.register})
+ try self.copyToNewRegisterWithExceptions(inst, dst_ty, lhs, &.{rhs.register})
else
- try self.copyToNewRegister(inst, lhs);
+ try self.copyToNewRegister(inst, dst_ty, lhs);
src_mcv = rhs;
} else {
dst_mcv = if (lhs.isRegister())
// If the allocated register is the same as the rhs register, don't allocate that one
// and instead spill a subsequent one. Otherwise, this can result in a miscompilation
// in the presence of several binary operations performed in a single block.
- try self.copyToNewRegisterWithExceptions(inst, rhs, &.{lhs.register})
+ try self.copyToNewRegisterWithExceptions(inst, dst_ty, rhs, &.{lhs.register})
else
- try self.copyToNewRegister(inst, rhs);
+ try self.copyToNewRegister(inst, dst_ty, rhs);
src_mcv = lhs;
}
}
@@ -1917,7 +1954,6 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs:
}
// Now for step 2, we assing an MIR instruction
- const dst_ty = self.air.typeOfIndex(inst);
const air_tags = self.air.instructions.items(.tag);
switch (air_tags[inst]) {
.add, .addwrap, .ptr_add => try self.genBinMathOpMir(.add, dst_ty, dst_mcv, src_mcv),
@@ -2417,7 +2453,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
.register => |reg| {
if (Register.allocIndex(reg) == null) {
// Save function return value in a callee saved register
- break :result try self.copyToNewRegister(inst, info.return_value);
+ break :result try self.copyToNewRegister(inst, self.air.typeOfIndex(inst), info.return_value);
}
},
else => {},
@@ -2494,7 +2530,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
// Either one, but not both, can be a memory operand.
// Source operand can be an immediate, 8 bits or 32 bits.
const dst_mcv = if (lhs.isImmediate() or (lhs.isMemory() and rhs.isMemory()))
- try self.copyToNewRegister(inst, lhs)
+ try self.copyToNewRegister(inst, ty, lhs)
else
lhs;
// This instruction supports only signed 32-bit immediates at most.
diff --git a/src/codegen.zig b/src/codegen.zig
index faafe79c13..bcd36358b1 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -373,11 +373,24 @@ pub fn generateSymbol(
},
.Struct => {
// TODO debug info
- // TODO padding of struct members
+ const struct_obj = typed_value.ty.castTag(.@"struct").?.data;
+ if (struct_obj.layout == .Packed) {
+ return Result{
+ .fail = try ErrorMsg.create(
+ bin_file.allocator,
+ src_loc,
+ "TODO implement generateSymbol for packed struct",
+ .{},
+ ),
+ };
+ }
+
+ const struct_begin = code.items.len;
const field_vals = typed_value.val.castTag(.@"struct").?.data;
for (field_vals) |field_val, index| {
const field_ty = typed_value.ty.structFieldType(index);
if (!field_ty.hasRuntimeBits()) continue;
+
switch (try generateSymbol(bin_file, src_loc, .{
.ty = field_ty,
.val = field_val,
@@ -388,6 +401,16 @@ pub fn generateSymbol(
},
.fail => |em| return Result{ .fail = em },
}
+ const unpadded_field_end = code.items.len - struct_begin;
+
+ // Pad struct members if required
+ const target = bin_file.options.target;
+ const padded_field_end = typed_value.ty.structFieldOffset(index + 1, target);
+ const padding = try math.cast(usize, padded_field_end - unpadded_field_end);
+
+ if (padding > 0) {
+ try code.writer().writeByteNTimes(0, padding);
+ }
}
return Result{ .appended = {} };
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 71956cd5b3..d0773c44c8 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -2367,6 +2367,7 @@ fn deinitRelocs(gpa: Allocator, table: *File.DbgInfoTypeRelocsTable) void {
}
fn updateDeclCode(self: *Elf, decl: *Module.Decl, code: []const u8, stt_bits: u8) !*elf.Elf64_Sym {
+ log.debug("updateDeclCode {s}{*}", .{ mem.sliceTo(decl.name, 0), decl });
const required_alignment = decl.ty.abiAlignment(self.base.options.target);
const block_list = self.getDeclBlockList(decl);