aboutsummaryrefslogtreecommitdiff
path: root/src/arch/wasm/CodeGen.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2023-08-24 20:43:43 -0700
committerAndrew Kelley <andrew@ziglang.org>2023-09-21 14:48:40 -0700
commitaccd5701c251c2741479fe08e56c8271c444f021 (patch)
tree78871f150609687a9210063e90f8f4eb53997c38 /src/arch/wasm/CodeGen.zig
parent0345d7866347c9066b0646f9e46be9a068dcfaa3 (diff)
downloadzig-accd5701c251c2741479fe08e56c8271c444f021.tar.gz
zig-accd5701c251c2741479fe08e56c8271c444f021.zip
compiler: move struct types into InternPool proper
Structs were previously using `SegmentedList` to be given indexes, but were not actually backed by the InternPool arrays. After this, the only remaining uses of `SegmentedList` in the compiler are `Module.Decl` and `Module.Namespace`. Once those last two are migrated to become backed by InternPool arrays as well, we can introduce state serialization via writing these arrays to disk all at once. Unfortunately there are a lot of source code locations that touch the struct type API, so this commit is still work-in-progress. Once I get it compiling and passing the test suite, I can provide some interesting data points such as how it affected the InternPool memory size and performance comparison against master branch. I also couldn't resist migrating over a bunch of alignment API over to use the log2 Alignment type rather than a mismash of u32 and u64 byte units with 0 meaning something implicitly different and special at every location. Turns out you can do all the math you need directly on the log2 representation of alignments.
Diffstat (limited to 'src/arch/wasm/CodeGen.zig')
-rw-r--r--src/arch/wasm/CodeGen.zig163
1 files changed, 88 insertions, 75 deletions
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 04ac8c4131..76a27ec718 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -25,6 +25,7 @@ const target_util = @import("../../target.zig");
const Mir = @import("Mir.zig");
const Emit = @import("Emit.zig");
const abi = @import("abi.zig");
+const Alignment = InternPool.Alignment;
const errUnionPayloadOffset = codegen.errUnionPayloadOffset;
const errUnionErrorOffset = codegen.errUnionErrorOffset;
@@ -709,7 +710,7 @@ stack_size: u32 = 0,
/// tool-conventions: https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md
/// and also what the llvm backend will emit.
/// However, local variables or the usage of `@setAlignStack` can overwrite this default.
-stack_alignment: u32 = 16,
+stack_alignment: Alignment = .@"16",
// For each individual Wasm valtype we store a seperate free list which
// allows us to re-use locals that are no longer used. e.g. a temporary local.
@@ -991,6 +992,7 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32
/// Using a given `Type`, returns the corresponding type
fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
const target = mod.getTarget();
+ const ip = &mod.intern_pool;
return switch (ty.zigTypeTag(mod)) {
.Float => switch (ty.floatBits(target)) {
16 => wasm.Valtype.i32, // stored/loaded as u16
@@ -1005,12 +1007,12 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64;
break :blk wasm.Valtype.i32; // represented as pointer to stack
},
- .Struct => switch (ty.containerLayout(mod)) {
- .Packed => {
- const struct_obj = mod.typeToStruct(ty).?;
- return typeToValtype(struct_obj.backing_int_ty, mod);
- },
- else => wasm.Valtype.i32,
+ .Struct => {
+ if (mod.typeToPackedStruct(ty)) |packed_struct| {
+ return typeToValtype(packed_struct.backingIntType(ip).toType(), mod);
+ } else {
+ return wasm.Valtype.i32;
+ }
},
.Vector => switch (determineSimdStoreStrategy(ty, mod)) {
.direct => wasm.Valtype.v128,
@@ -1285,12 +1287,12 @@ fn genFunc(func: *CodeGen) InnerError!void {
// store stack pointer so we can restore it when we return from the function
try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } });
// get the total stack size
- const aligned_stack = std.mem.alignForward(u32, func.stack_size, func.stack_alignment);
- try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(aligned_stack)) } });
- // substract it from the current stack pointer
+ const aligned_stack = func.stack_alignment.forward(func.stack_size);
+ try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(aligned_stack) } });
+ // subtract it from the current stack pointer
try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } });
// Get negative stack aligment
- try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment)) * -1 } });
+ try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment.toByteUnitsOptional().?)) * -1 } });
// Bitwise-and the value to get the new stack pointer to ensure the pointers are aligned with the abi alignment
try prologue.append(.{ .tag = .i32_and, .data = .{ .tag = {} } });
// store the current stack pointer as the bottom, which will be used to calculate all stack pointer offsets
@@ -1438,7 +1440,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
});
try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
.offset = value.offset(),
- .alignment = scalar_type.abiAlignment(mod),
+ .alignment = @intCast(scalar_type.abiAlignment(mod).toByteUnitsOptional().?),
});
}
},
@@ -1527,11 +1529,9 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue {
};
const abi_align = ty.abiAlignment(mod);
- if (abi_align > func.stack_alignment) {
- func.stack_alignment = abi_align;
- }
+ func.stack_alignment = func.stack_alignment.max(abi_align);
- const offset = std.mem.alignForward(u32, func.stack_size, abi_align);
+ const offset: u32 = @intCast(abi_align.forward(func.stack_size));
defer func.stack_size = offset + abi_size;
return WValue{ .stack_offset = .{ .value = offset, .references = 1 } };
@@ -1560,11 +1560,9 @@ fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue {
pointee_ty.fmt(mod), pointee_ty.abiSize(mod),
});
};
- if (abi_alignment > func.stack_alignment) {
- func.stack_alignment = abi_alignment;
- }
+ func.stack_alignment = func.stack_alignment.max(abi_alignment);
- const offset = std.mem.alignForward(u32, func.stack_size, abi_alignment);
+ const offset: u32 = @intCast(abi_alignment.forward(func.stack_size));
defer func.stack_size = offset + abi_size;
return WValue{ .stack_offset = .{ .value = offset, .references = 1 } };
@@ -1749,10 +1747,8 @@ fn isByRef(ty: Type, mod: *Module) bool {
return ty.hasRuntimeBitsIgnoreComptime(mod);
},
.Struct => {
- if (mod.typeToStruct(ty)) |struct_obj| {
- if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) {
- return isByRef(struct_obj.backing_int_ty, mod);
- }
+ if (mod.typeToPackedStruct(ty)) |packed_struct| {
+ return isByRef(packed_struct.backingIntType(ip).toType(), mod);
}
return ty.hasRuntimeBitsIgnoreComptime(mod);
},
@@ -2120,7 +2116,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
});
try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
.offset = operand.offset(),
- .alignment = scalar_type.abiAlignment(mod),
+ .alignment = @intCast(scalar_type.abiAlignment(mod).toByteUnitsOptional().?),
});
},
else => try func.emitWValue(operand),
@@ -2385,19 +2381,19 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
},
.Vector => switch (determineSimdStoreStrategy(ty, mod)) {
.unrolled => {
- const len = @as(u32, @intCast(abi_size));
+ const len: u32 = @intCast(abi_size);
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
.direct => {
try func.emitWValue(lhs);
try func.lowerToStack(rhs);
// TODO: Add helper functions for simd opcodes
- const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
+ const extra_index: u32 = @intCast(func.mir_extra.items.len);
// stores as := opcode, offset, alignment (opcode::memarg)
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_store),
offset + lhs.offset(),
- ty.abiAlignment(mod),
+ @intCast(ty.abiAlignment(mod).toByteUnits(0)),
});
return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
},
@@ -2451,7 +2447,10 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
// store rhs value at stack pointer's location in memory
try func.addMemArg(
Mir.Inst.Tag.fromOpcode(opcode),
- .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(mod) },
+ .{
+ .offset = offset + lhs.offset(),
+ .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
+ },
);
}
@@ -2510,7 +2509,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_load),
offset + operand.offset(),
- ty.abiAlignment(mod),
+ @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
});
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
return WValue{ .stack = {} };
@@ -2526,7 +2525,10 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
try func.addMemArg(
Mir.Inst.Tag.fromOpcode(opcode),
- .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(mod) },
+ .{
+ .offset = offset + operand.offset(),
+ .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
+ },
);
return WValue{ .stack = {} };
@@ -3023,10 +3025,10 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
else => blk: {
const layout: Module.UnionLayout = parent_ty.unionGetLayout(mod);
if (layout.payload_size == 0) break :blk 0;
- if (layout.payload_align > layout.tag_align) break :blk 0;
+ if (layout.payload_align.compare(.gt, layout.tag_align)) break :blk 0;
// tag is stored first so calculate offset from where payload starts
- break :blk @as(u32, @intCast(std.mem.alignForward(u64, layout.tag_size, layout.tag_align)));
+ break :blk layout.tag_align.forward(layout.tag_size);
},
},
.Pointer => switch (parent_ty.ptrSize(mod)) {
@@ -3103,8 +3105,12 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo(
return @as(WantedT, @intCast(result));
}
+/// This function is intended to assert that `isByRef` returns `false` for `ty`.
+/// However such an assertion fails on the behavior tests currently.
fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
const mod = func.bin_file.base.options.module.?;
+ // TODO: enable this assertion
+ //assert(!isByRef(ty, mod));
const ip = &mod.intern_pool;
var val = arg_val;
switch (ip.indexToKey(val.ip_index)) {
@@ -3235,16 +3241,18 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
val.writeToMemory(ty, mod, &buf) catch unreachable;
return func.storeSimdImmd(buf);
},
- .struct_type, .anon_struct_type => {
- const struct_obj = mod.typeToStruct(ty).?;
- assert(struct_obj.layout == .Packed);
+ .struct_type => |struct_type| {
+ // non-packed structs are not handled in this function because they
+ // are by-ref types.
+ assert(struct_type.layout == .Packed);
var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
- val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable;
+ val.writeToPackedMemory(ty, mod, &buf, 0) catch unreachable;
+ const backing_int_ty = struct_type.backingIntType(ip).toType();
const int_val = try mod.intValue(
- struct_obj.backing_int_ty,
- std.mem.readIntLittle(u64, &buf),
+ backing_int_ty,
+ mem.readIntLittle(u64, &buf),
);
- return func.lowerConstant(int_val, struct_obj.backing_int_ty);
+ return func.lowerConstant(int_val, backing_int_ty);
},
else => unreachable,
},
@@ -3269,6 +3277,7 @@ fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue {
fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
const mod = func.bin_file.base.options.module.?;
+ const ip = &mod.intern_pool;
switch (ty.zigTypeTag(mod)) {
.Bool, .ErrorSet => return WValue{ .imm32 = 0xaaaaaaaa },
.Int, .Enum => switch (ty.intInfo(mod).bits) {
@@ -3298,9 +3307,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
return WValue{ .imm32 = 0xaaaaaaaa };
},
.Struct => {
- const struct_obj = mod.typeToStruct(ty).?;
- assert(struct_obj.layout == .Packed);
- return func.emitUndefined(struct_obj.backing_int_ty);
+ const packed_struct = mod.typeToPackedStruct(ty).?;
+ return func.emitUndefined(packed_struct.backingIntType(ip).toType());
},
else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(mod)}),
}
@@ -3340,7 +3348,7 @@ fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 {
.i64 => |x| @as(i32, @intCast(x)),
.u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))),
.big_int => unreachable,
- .lazy_align => |ty| @as(i32, @bitCast(ty.toType().abiAlignment(mod))),
+ .lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(ty.toType().abiAlignment(mod).toByteUnits(0))))),
.lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(ty.toType().abiSize(mod))))),
};
}
@@ -3757,6 +3765,7 @@ fn structFieldPtr(
fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mod = func.bin_file.base.options.module.?;
+ const ip = &mod.intern_pool;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data;
@@ -3769,9 +3778,9 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result = switch (struct_ty.containerLayout(mod)) {
.Packed => switch (struct_ty.zigTypeTag(mod)) {
.Struct => result: {
- const struct_obj = mod.typeToStruct(struct_ty).?;
- const offset = struct_obj.packedFieldBitOffset(mod, field_index);
- const backing_ty = struct_obj.backing_int_ty;
+ const packed_struct = mod.typeToPackedStruct(struct_ty).?;
+ const offset = mod.structPackedFieldBitOffset(packed_struct, field_index);
+ const backing_ty = packed_struct.backingIntType(ip).toType();
const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse {
return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{});
};
@@ -3793,7 +3802,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const truncated = try func.trunc(shifted_value, int_type, backing_ty);
const bitcasted = try func.bitcast(field_ty, int_type, truncated);
break :result try bitcasted.toLocal(func, field_ty);
- } else if (field_ty.isPtrAtRuntime(mod) and struct_obj.fields.count() == 1) {
+ } else if (field_ty.isPtrAtRuntime(mod) and packed_struct.field_types.len == 1) {
// In this case we do not have to perform any transformations,
// we can simply reuse the operand.
break :result func.reuseOperand(struct_field.struct_operand, operand);
@@ -4053,7 +4062,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try func.addMemArg(.i32_load16_u, .{
.offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))),
- .alignment = Type.anyerror.abiAlignment(mod),
+ .alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnitsOptional().?),
});
}
@@ -4141,7 +4150,10 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void
try func.emitWValue(err_union);
try func.addImm32(0);
const err_val_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)));
- try func.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 });
+ try func.addMemArg(.i32_store16, .{
+ .offset = err_union.offset() + err_val_offset,
+ .alignment = 2,
+ });
break :result err_union;
};
func.finishAir(inst, result, &.{ty_op.operand});
@@ -4977,7 +4989,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
opcode,
operand.offset(),
- elem_ty.abiAlignment(mod),
+ @intCast(elem_ty.abiAlignment(mod).toByteUnitsOptional().?),
});
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
try func.addLabel(.local_set, result.local.value);
@@ -5065,7 +5077,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
std.wasm.simdOpcode(.i8x16_shuffle),
} ++ [1]u32{undefined} ** 4;
- var lanes = std.mem.asBytes(operands[1..]);
+ var lanes = mem.asBytes(operands[1..]);
for (0..@as(usize, @intCast(mask_len))) |index| {
const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod);
const base_index = if (mask_elem >= 0)
@@ -5099,6 +5111,7 @@ fn airReduce(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mod = func.bin_file.base.options.module.?;
+ const ip = &mod.intern_pool;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const result_ty = func.typeOfIndex(inst);
const len = @as(usize, @intCast(result_ty.arrayLen(mod)));
@@ -5150,13 +5163,13 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (isByRef(result_ty, mod)) {
return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
}
- const struct_obj = mod.typeToStruct(result_ty).?;
- const fields = struct_obj.fields.values();
- const backing_type = struct_obj.backing_int_ty;
+ const packed_struct = mod.typeToPackedStruct(result_ty).?;
+ const field_types = packed_struct.field_types;
+ const backing_type = packed_struct.backingIntType(ip).toType();
// ensure the result is zero'd
const result = try func.allocLocal(backing_type);
- if (struct_obj.backing_int_ty.bitSize(mod) <= 32)
+ if (backing_type.bitSize(mod) <= 32)
try func.addImm32(0)
else
try func.addImm64(0);
@@ -5164,22 +5177,22 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var current_bit: u16 = 0;
for (elements, 0..) |elem, elem_index| {
- const field = fields[elem_index];
- if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ const field_ty = field_types.get(ip)[elem_index].toType();
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- const shift_val = if (struct_obj.backing_int_ty.bitSize(mod) <= 32)
+ const shift_val = if (backing_type.bitSize(mod) <= 32)
WValue{ .imm32 = current_bit }
else
WValue{ .imm64 = current_bit };
const value = try func.resolveInst(elem);
- const value_bit_size = @as(u16, @intCast(field.ty.bitSize(mod)));
+ const value_bit_size: u16 = @intCast(field_ty.bitSize(mod));
const int_ty = try mod.intType(.unsigned, value_bit_size);
// load our current result on stack so we can perform all transformations
// using only stack values. Saving the cost of loads and stores.
try func.emitWValue(result);
- const bitcasted = try func.bitcast(int_ty, field.ty, value);
+ const bitcasted = try func.bitcast(int_ty, field_ty, value);
const extended_val = try func.intcast(bitcasted, int_ty, backing_type);
// no need to shift any values when the current offset is 0
const shifted = if (current_bit != 0) shifted: {
@@ -5199,7 +5212,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue;
const elem_ty = result_ty.structFieldType(elem_index, mod);
- const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
+ const elem_size: u32 = @intCast(elem_ty.abiSize(mod));
const value = try func.resolveInst(elem);
try func.store(offset, value, elem_ty, 0);
@@ -5256,7 +5269,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (isByRef(union_ty, mod)) {
const result_ptr = try func.allocStack(union_ty);
const payload = try func.resolveInst(extra.init);
- if (layout.tag_align >= layout.payload_align) {
+ if (layout.tag_align.compare(.gte, layout.payload_align)) {
if (isByRef(field_ty, mod)) {
const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new);
try func.store(payload_ptr, payload, field_ty, 0);
@@ -5420,9 +5433,9 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// when the tag alignment is smaller than the payload, the field will be stored
// after the payload.
- const offset = if (layout.tag_align < layout.payload_align) blk: {
- break :blk @as(u32, @intCast(layout.payload_size));
- } else @as(u32, 0);
+ const offset: u32 = if (layout.tag_align.compare(.lt, layout.payload_align)) blk: {
+ break :blk @intCast(layout.payload_size);
+ } else 0;
try func.store(union_ptr, new_tag, tag_ty, offset);
func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
}
@@ -5439,9 +5452,9 @@ fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
// when the tag alignment is smaller than the payload, the field will be stored
// after the payload.
- const offset = if (layout.tag_align < layout.payload_align) blk: {
- break :blk @as(u32, @intCast(layout.payload_size));
- } else @as(u32, 0);
+ const offset: u32 = if (layout.tag_align.compare(.lt, layout.payload_align)) blk: {
+ break :blk @intCast(layout.payload_size);
+ } else 0;
const tag = try func.load(operand, tag_ty, offset);
const result = try tag.toLocal(func, tag_ty);
func.finishAir(inst, result, &.{ty_op.operand});
@@ -6366,7 +6379,7 @@ fn lowerTry(
const err_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)));
try func.addMemArg(.i32_load16_u, .{
.offset = err_union.offset() + err_offset,
- .alignment = Type.anyerror.abiAlignment(mod),
+ .alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnitsOptional().?),
});
}
try func.addTag(.i32_eqz);
@@ -7287,7 +7300,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}),
}, .{
.offset = ptr_operand.offset(),
- .alignment = ty.abiAlignment(mod),
+ .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
});
try func.addLabel(.local_tee, val_local.local.value);
_ = try func.cmp(.stack, expected_val, ty, .eq);
@@ -7349,7 +7362,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr);
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = ty.abiAlignment(mod),
+ .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
});
} else {
_ = try func.load(ptr, ty, 0);
@@ -7410,7 +7423,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
.{
.offset = ptr.offset(),
- .alignment = ty.abiAlignment(mod),
+ .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
},
);
const select_res = try func.allocLocal(ty);
@@ -7470,7 +7483,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
};
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = ty.abiAlignment(mod),
+ .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
});
const result = try WValue.toLocal(.stack, func, ty);
return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand });
@@ -7566,7 +7579,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.lowerToStack(operand);
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = ty.abiAlignment(mod),
+ .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
});
} else {
try func.store(ptr, operand, ty, 0);