diff options
Diffstat (limited to 'src/Sema')
| -rw-r--r-- | src/Sema/bitcast.zig | 92 | ||||
| -rw-r--r-- | src/Sema/comptime_ptr_access.zig | 57 |
2 files changed, 77 insertions, 72 deletions
diff --git a/src/Sema/bitcast.zig b/src/Sema/bitcast.zig index 065de877e2..73aa53e5e6 100644 --- a/src/Sema/bitcast.zig +++ b/src/Sema/bitcast.zig @@ -85,23 +85,23 @@ fn bitCastInner( assert(val_ty.hasWellDefinedLayout(zcu)); const abi_pad_bits, const host_pad_bits = if (host_bits > 0) - .{ val_ty.abiSize(pt) * 8 - host_bits, host_bits - val_ty.bitSize(pt) } + .{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) } else - .{ val_ty.abiSize(pt) * 8 - val_ty.bitSize(pt), 0 }; + .{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 }; const skip_bits = switch (endian) { .little => bit_offset + byte_offset * 8, .big => if (host_bits > 0) - val_ty.abiSize(pt) * 8 - byte_offset * 8 - host_bits + bit_offset + val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset else - val_ty.abiSize(pt) * 8 - byte_offset * 8 - dest_ty.bitSize(pt), + val_ty.abiSize(zcu) * 8 - byte_offset * 8 - dest_ty.bitSize(zcu), }; var unpack: UnpackValueBits = .{ .pt = sema.pt, .arena = sema.arena, .skip_bits = skip_bits, - .remaining_bits = dest_ty.bitSize(pt), + .remaining_bits = dest_ty.bitSize(zcu), .unpacked = std.ArrayList(InternPool.Index).init(sema.arena), }; switch (endian) { @@ -141,22 +141,22 @@ fn bitCastSpliceInner( try val_ty.resolveLayout(pt); try splice_val_ty.resolveLayout(pt); - const splice_bits = splice_val_ty.bitSize(pt); + const splice_bits = splice_val_ty.bitSize(zcu); const splice_offset = switch (endian) { .little => bit_offset + byte_offset * 8, .big => if (host_bits > 0) - val_ty.abiSize(pt) * 8 - byte_offset * 8 - host_bits + bit_offset + val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset else - val_ty.abiSize(pt) * 8 - byte_offset * 8 - splice_bits, + val_ty.abiSize(zcu) * 8 - byte_offset * 8 - splice_bits, }; - assert(splice_offset + splice_bits <= val_ty.abiSize(pt) * 8); + assert(splice_offset + splice_bits <= val_ty.abiSize(zcu) * 8); const abi_pad_bits, const host_pad_bits = if (host_bits > 0) - .{ val_ty.abiSize(pt) * 8 - host_bits, host_bits - val_ty.bitSize(pt) } + .{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) } else - .{ val_ty.abiSize(pt) * 8 - val_ty.bitSize(pt), 0 }; + .{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 }; var unpack: UnpackValueBits = .{ .pt = pt, @@ -181,7 +181,7 @@ fn bitCastSpliceInner( try unpack.add(splice_val); unpack.skip_bits = splice_offset + splice_bits; - unpack.remaining_bits = val_ty.abiSize(pt) * 8 - splice_offset - splice_bits; + unpack.remaining_bits = val_ty.abiSize(zcu) * 8 - splice_offset - splice_bits; switch (endian) { .little => { try unpack.add(val); @@ -229,7 +229,7 @@ const UnpackValueBits = struct { } const ty = val.typeOf(zcu); - const bit_size = ty.bitSize(pt); + const bit_size = ty.bitSize(zcu); if (unpack.skip_bits >= bit_size) { unpack.skip_bits -= bit_size; @@ -291,7 +291,7 @@ const UnpackValueBits = struct { // The final element does not have trailing padding. // Elements are reversed in packed memory on BE targets. const elem_ty = ty.childType(zcu); - const pad_bits = elem_ty.abiSize(pt) * 8 - elem_ty.bitSize(pt); + const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu); const len = ty.arrayLen(zcu); const maybe_sent = ty.sentinel(zcu); @@ -323,12 +323,12 @@ const UnpackValueBits = struct { var cur_bit_off: u64 = 0; var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip); while (it.next()) |field_idx| { - const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8; + const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8; const pad_bits = want_bit_off - cur_bit_off; const field_val = try val.fieldValue(pt, field_idx); try unpack.padding(pad_bits); try unpack.add(field_val); - cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(pt); + cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(zcu); } // Add trailing padding bits. try unpack.padding(bit_size - cur_bit_off); @@ -339,11 +339,11 @@ const UnpackValueBits = struct { while (it.next()) |field_idx| { const field_val = try val.fieldValue(pt, field_idx); const field_ty = field_val.typeOf(zcu); - const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8 + field_ty.bitSize(pt); + const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu); const pad_bits = cur_bit_off - want_bit_off; try unpack.padding(pad_bits); try unpack.add(field_val); - cur_bit_off = want_bit_off - field_ty.bitSize(pt); + cur_bit_off = want_bit_off - field_ty.bitSize(zcu); } assert(cur_bit_off == 0); }, @@ -366,7 +366,7 @@ const UnpackValueBits = struct { // This correctly handles the case where `tag == .none`, since the payload is then // either an integer or a byte array, both of which we can unpack. const payload_val = Value.fromInterned(un.val); - const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(pt); + const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(zcu); if (endian == .little or ty.containerLayout(zcu) == .@"packed") { try unpack.add(payload_val); try unpack.padding(pad_bits); @@ -398,13 +398,14 @@ const UnpackValueBits = struct { fn primitive(unpack: *UnpackValueBits, val: Value) BitCastError!void { const pt = unpack.pt; + const zcu = pt.zcu; if (unpack.remaining_bits == 0) { return; } const ty = val.typeOf(pt.zcu); - const bit_size = ty.bitSize(pt); + const bit_size = ty.bitSize(zcu); // Note that this skips all zero-bit types. if (unpack.skip_bits >= bit_size) { @@ -429,9 +430,10 @@ const UnpackValueBits = struct { fn splitPrimitive(unpack: *UnpackValueBits, val: Value, bit_offset: u64, bit_count: u64) BitCastError!void { const pt = unpack.pt; + const zcu = pt.zcu; const ty = val.typeOf(pt.zcu); - const val_bits = ty.bitSize(pt); + const val_bits = ty.bitSize(zcu); assert(bit_offset + bit_count <= val_bits); switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) { @@ -499,12 +501,12 @@ const PackValueBits = struct { const len = ty.arrayLen(zcu); const elem_ty = ty.childType(zcu); const maybe_sent = ty.sentinel(zcu); - const pad_bits = elem_ty.abiSize(pt) * 8 - elem_ty.bitSize(pt); + const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu); const elems = try arena.alloc(InternPool.Index, @intCast(len)); if (endian == .big and maybe_sent != null) { // TODO: validate sentinel was preserved! - try pack.padding(elem_ty.bitSize(pt)); + try pack.padding(elem_ty.bitSize(zcu)); if (len != 0) try pack.padding(pad_bits); } @@ -520,7 +522,7 @@ const PackValueBits = struct { if (endian == .little and maybe_sent != null) { // TODO: validate sentinel was preserved! if (len != 0) try pack.padding(pad_bits); - try pack.padding(elem_ty.bitSize(pt)); + try pack.padding(elem_ty.bitSize(zcu)); } return Value.fromInterned(try pt.intern(.{ .aggregate = .{ @@ -538,23 +540,23 @@ const PackValueBits = struct { var cur_bit_off: u64 = 0; var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip); while (it.next()) |field_idx| { - const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8; + const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8; try pack.padding(want_bit_off - cur_bit_off); const field_ty = ty.structFieldType(field_idx, zcu); elems[field_idx] = (try pack.get(field_ty)).toIntern(); - cur_bit_off = want_bit_off + field_ty.bitSize(pt); + cur_bit_off = want_bit_off + field_ty.bitSize(zcu); } - try pack.padding(ty.bitSize(pt) - cur_bit_off); + try pack.padding(ty.bitSize(zcu) - cur_bit_off); }, .big => { - var cur_bit_off: u64 = ty.bitSize(pt); + var cur_bit_off: u64 = ty.bitSize(zcu); var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip); while (it.next()) |field_idx| { const field_ty = ty.structFieldType(field_idx, zcu); - const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8 + field_ty.bitSize(pt); + const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu); try pack.padding(cur_bit_off - want_bit_off); elems[field_idx] = (try pack.get(field_ty)).toIntern(); - cur_bit_off = want_bit_off - field_ty.bitSize(pt); + cur_bit_off = want_bit_off - field_ty.bitSize(zcu); } assert(cur_bit_off == 0); }, @@ -622,16 +624,16 @@ const PackValueBits = struct { for (field_order, 0..) |*f, i| f.* = @intCast(i); // Sort `field_order` to put the fields with the largest bit sizes first. const SizeSortCtx = struct { - pt: Zcu.PerThread, + zcu: *Zcu, field_types: []const InternPool.Index, fn lessThan(ctx: @This(), a_idx: u32, b_idx: u32) bool { const a_ty = Type.fromInterned(ctx.field_types[a_idx]); const b_ty = Type.fromInterned(ctx.field_types[b_idx]); - return a_ty.bitSize(ctx.pt) > b_ty.bitSize(ctx.pt); + return a_ty.bitSize(ctx.zcu) > b_ty.bitSize(ctx.zcu); } }; std.mem.sortUnstable(u32, field_order, SizeSortCtx{ - .pt = pt, + .zcu = zcu, .field_types = zcu.typeToUnion(ty).?.field_types.get(ip), }, SizeSortCtx.lessThan); @@ -639,7 +641,7 @@ const PackValueBits = struct { for (field_order) |field_idx| { const field_ty = Type.fromInterned(zcu.typeToUnion(ty).?.field_types.get(ip)[field_idx]); - const pad_bits = ty.bitSize(pt) - field_ty.bitSize(pt); + const pad_bits = ty.bitSize(zcu) - field_ty.bitSize(zcu); if (!padding_after) try pack.padding(pad_bits); const field_val = pack.get(field_ty) catch |err| switch (err) { error.ReinterpretDeclRef => { @@ -682,10 +684,11 @@ const PackValueBits = struct { fn primitive(pack: *PackValueBits, want_ty: Type) BitCastError!Value { const pt = pack.pt; - const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(pt)); + const zcu = pt.zcu; + const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(zcu)); for (vals) |val| { - if (!Value.fromInterned(val).isUndef(pt.zcu)) break; + if (!Value.fromInterned(val).isUndef(zcu)) break; } else { // All bits of the value are `undefined`. return pt.undefValue(want_ty); @@ -706,8 +709,8 @@ const PackValueBits = struct { ptr_cast: { if (vals.len != 1) break :ptr_cast; const val = Value.fromInterned(vals[0]); - if (!val.typeOf(pt.zcu).isPtrAtRuntime(pt.zcu)) break :ptr_cast; - if (!want_ty.isPtrAtRuntime(pt.zcu)) break :ptr_cast; + if (!val.typeOf(zcu).isPtrAtRuntime(zcu)) break :ptr_cast; + if (!want_ty.isPtrAtRuntime(zcu)) break :ptr_cast; return pt.getCoerced(val, want_ty); } @@ -717,7 +720,7 @@ const PackValueBits = struct { for (vals) |ip_val| { const val = Value.fromInterned(ip_val); const ty = val.typeOf(pt.zcu); - buf_bits += ty.bitSize(pt); + buf_bits += ty.bitSize(zcu); } const buf = try pack.arena.alloc(u8, @intCast((buf_bits + 7) / 8)); @@ -726,11 +729,11 @@ const PackValueBits = struct { var cur_bit_off: usize = 0; for (vals) |ip_val| { const val = Value.fromInterned(ip_val); - const ty = val.typeOf(pt.zcu); - if (!val.isUndef(pt.zcu)) { + const ty = val.typeOf(zcu); + if (!val.isUndef(zcu)) { try val.writeToPackedMemory(ty, pt, buf, cur_bit_off); } - cur_bit_off += @intCast(ty.bitSize(pt)); + cur_bit_off += @intCast(ty.bitSize(zcu)); } return Value.readFromPackedMemory(want_ty, pt, buf, @intCast(bit_offset), pack.arena); @@ -740,11 +743,12 @@ const PackValueBits = struct { if (need_bits == 0) return .{ &.{}, 0 }; const pt = pack.pt; + const zcu = pt.zcu; var bits: u64 = 0; var len: usize = 0; while (bits < pack.bit_offset + need_bits) { - bits += Value.fromInterned(pack.unpacked[len]).typeOf(pt.zcu).bitSize(pt); + bits += Value.fromInterned(pack.unpacked[len]).typeOf(pt.zcu).bitSize(zcu); len += 1; } @@ -757,7 +761,7 @@ const PackValueBits = struct { pack.bit_offset = 0; } else { pack.unpacked = pack.unpacked[len - 1 ..]; - pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(pt.zcu).bitSize(pt) - extra_bits; + pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(pt.zcu).bitSize(zcu) - extra_bits; } return .{ result_vals, result_offset }; diff --git a/src/Sema/comptime_ptr_access.zig b/src/Sema/comptime_ptr_access.zig index 8f0b8b1b17..8549e32d2b 100644 --- a/src/Sema/comptime_ptr_access.zig +++ b/src/Sema/comptime_ptr_access.zig @@ -13,14 +13,15 @@ pub const ComptimeLoadResult = union(enum) { pub fn loadComptimePtr(sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Value) !ComptimeLoadResult { const pt = sema.pt; + const zcu = pt.zcu; const ptr_info = ptr.typeOf(pt.zcu).ptrInfo(pt.zcu); // TODO: host size for vectors is terrible const host_bits = switch (ptr_info.flags.vector_index) { .none => ptr_info.packed_offset.host_size * 8, - else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(pt), + else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu), }; const bit_offset = if (host_bits != 0) bit_offset: { - const child_bits = Type.fromInterned(ptr_info.child).bitSize(pt); + const child_bits = Type.fromInterned(ptr_info.child).bitSize(zcu); const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) { .none => 0, .runtime => return .runtime_load, @@ -67,18 +68,18 @@ pub fn storeComptimePtr( // TODO: host size for vectors is terrible const host_bits = switch (ptr_info.flags.vector_index) { .none => ptr_info.packed_offset.host_size * 8, - else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(pt), + else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu), }; const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) { .none => 0, .runtime => return .runtime_store, else => |idx| switch (zcu.getTarget().cpu.arch.endian()) { - .little => Type.fromInterned(ptr_info.child).bitSize(pt) * @intFromEnum(idx), - .big => host_bits - Type.fromInterned(ptr_info.child).bitSize(pt) * (@intFromEnum(idx) + 1), // element order reversed on big endian + .little => Type.fromInterned(ptr_info.child).bitSize(zcu) * @intFromEnum(idx), + .big => host_bits - Type.fromInterned(ptr_info.child).bitSize(zcu) * (@intFromEnum(idx) + 1), // element order reversed on big endian }, }; const pseudo_store_ty = if (host_bits > 0) t: { - const need_bits = Type.fromInterned(ptr_info.child).bitSize(pt); + const need_bits = Type.fromInterned(ptr_info.child).bitSize(zcu); if (need_bits + bit_offset > host_bits) { return .exceeds_host_size; } @@ -166,9 +167,9 @@ pub fn storeComptimePtr( .direct => |direct| .{ direct.val, 0 }, .index => |index| .{ index.val, - index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(pt), + index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(zcu), }, - .flat_index => |flat| .{ flat.val, flat.flat_elem_index * flat.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(pt) }, + .flat_index => |flat| .{ flat.val, flat.flat_elem_index * flat.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(zcu) }, .reinterpret => |reinterpret| .{ reinterpret.val, reinterpret.byte_offset }, else => unreachable, }; @@ -347,8 +348,8 @@ fn loadComptimePtrInner( const load_one_ty, const load_count = load_ty.arrayBase(zcu); const extra_base_index: u64 = if (ptr.byte_offset == 0) 0 else idx: { - if (try sema.typeRequiresComptime(load_one_ty)) break :restructure_array; - const elem_len = try sema.typeAbiSize(load_one_ty); + if (try load_one_ty.comptimeOnlySema(pt)) break :restructure_array; + const elem_len = try load_one_ty.abiSizeSema(pt); if (ptr.byte_offset % elem_len != 0) break :restructure_array; break :idx @divExact(ptr.byte_offset, elem_len); }; @@ -394,12 +395,12 @@ fn loadComptimePtrInner( var cur_offset = ptr.byte_offset; if (load_ty.zigTypeTag(zcu) == .Array and array_offset > 0) { - cur_offset += try sema.typeAbiSize(load_ty.childType(zcu)) * array_offset; + cur_offset += try load_ty.childType(zcu).abiSizeSema(pt) * array_offset; } - const need_bytes = if (host_bits > 0) (host_bits + 7) / 8 else try sema.typeAbiSize(load_ty); + const need_bytes = if (host_bits > 0) (host_bits + 7) / 8 else try load_ty.abiSizeSema(pt); - if (cur_offset + need_bytes > try sema.typeAbiSize(cur_val.typeOf(zcu))) { + if (cur_offset + need_bytes > try cur_val.typeOf(zcu).abiSizeSema(pt)) { return .{ .out_of_bounds = cur_val.typeOf(zcu) }; } @@ -434,7 +435,7 @@ fn loadComptimePtrInner( .Optional => break, // this can only be a pointer-like optional so is terminal .Array => { const elem_ty = cur_ty.childType(zcu); - const elem_size = try sema.typeAbiSize(elem_ty); + const elem_size = try elem_ty.abiSizeSema(pt); const elem_idx = cur_offset / elem_size; const next_elem_off = elem_size * (elem_idx + 1); if (cur_offset + need_bytes <= next_elem_off) { @@ -449,8 +450,8 @@ fn loadComptimePtrInner( .auto => unreachable, // ill-defined layout .@"packed" => break, // let the bitcast logic handle this .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| { - const start_off = cur_ty.structFieldOffset(field_idx, pt); - const end_off = start_off + try sema.typeAbiSize(cur_ty.structFieldType(field_idx, zcu)); + const start_off = cur_ty.structFieldOffset(field_idx, zcu); + const end_off = start_off + try cur_ty.structFieldType(field_idx, zcu).abiSizeSema(pt); if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) { cur_val = try cur_val.getElem(sema.pt, field_idx); cur_offset -= start_off; @@ -477,7 +478,7 @@ fn loadComptimePtrInner( }; // The payload always has offset 0. If it's big enough // to represent the whole load type, we can use it. - if (try sema.typeAbiSize(payload.typeOf(zcu)) >= need_bytes) { + if (try payload.typeOf(zcu).abiSizeSema(pt) >= need_bytes) { cur_val = payload; } else { break; @@ -746,8 +747,8 @@ fn prepareComptimePtrStore( const store_one_ty, const store_count = store_ty.arrayBase(zcu); const extra_base_index: u64 = if (ptr.byte_offset == 0) 0 else idx: { - if (try sema.typeRequiresComptime(store_one_ty)) break :restructure_array; - const elem_len = try sema.typeAbiSize(store_one_ty); + if (try store_one_ty.comptimeOnlySema(pt)) break :restructure_array; + const elem_len = try store_one_ty.abiSizeSema(pt); if (ptr.byte_offset % elem_len != 0) break :restructure_array; break :idx @divExact(ptr.byte_offset, elem_len); }; @@ -800,11 +801,11 @@ fn prepareComptimePtrStore( var cur_val: *MutableValue, var cur_offset: u64 = switch (base_strat) { .direct => |direct| .{ direct.val, 0 }, // It's okay to do `abiSize` - the comptime-only case will be caught below. - .index => |index| .{ index.val, index.elem_index * try sema.typeAbiSize(index.val.typeOf(zcu).childType(zcu)) }, + .index => |index| .{ index.val, index.elem_index * try index.val.typeOf(zcu).childType(zcu).abiSizeSema(pt) }, .flat_index => |flat_index| .{ flat_index.val, // It's okay to do `abiSize` - the comptime-only case will be caught below. - flat_index.flat_elem_index * try sema.typeAbiSize(flat_index.val.typeOf(zcu).arrayBase(zcu)[0]), + flat_index.flat_elem_index * try flat_index.val.typeOf(zcu).arrayBase(zcu)[0].abiSizeSema(pt), }, .reinterpret => |r| .{ r.val, r.byte_offset }, else => unreachable, @@ -816,12 +817,12 @@ fn prepareComptimePtrStore( } if (store_ty.zigTypeTag(zcu) == .Array and array_offset > 0) { - cur_offset += try sema.typeAbiSize(store_ty.childType(zcu)) * array_offset; + cur_offset += try store_ty.childType(zcu).abiSizeSema(pt) * array_offset; } - const need_bytes = try sema.typeAbiSize(store_ty); + const need_bytes = try store_ty.abiSizeSema(pt); - if (cur_offset + need_bytes > try sema.typeAbiSize(cur_val.typeOf(zcu))) { + if (cur_offset + need_bytes > try cur_val.typeOf(zcu).abiSizeSema(pt)) { return .{ .out_of_bounds = cur_val.typeOf(zcu) }; } @@ -856,7 +857,7 @@ fn prepareComptimePtrStore( .Optional => break, // this can only be a pointer-like optional so is terminal .Array => { const elem_ty = cur_ty.childType(zcu); - const elem_size = try sema.typeAbiSize(elem_ty); + const elem_size = try elem_ty.abiSizeSema(pt); const elem_idx = cur_offset / elem_size; const next_elem_off = elem_size * (elem_idx + 1); if (cur_offset + need_bytes <= next_elem_off) { @@ -871,8 +872,8 @@ fn prepareComptimePtrStore( .auto => unreachable, // ill-defined layout .@"packed" => break, // let the bitcast logic handle this .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| { - const start_off = cur_ty.structFieldOffset(field_idx, pt); - const end_off = start_off + try sema.typeAbiSize(cur_ty.structFieldType(field_idx, zcu)); + const start_off = cur_ty.structFieldOffset(field_idx, zcu); + const end_off = start_off + try cur_ty.structFieldType(field_idx, zcu).abiSizeSema(pt); if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) { cur_val = try cur_val.elem(pt, sema.arena, field_idx); cur_offset -= start_off; @@ -895,7 +896,7 @@ fn prepareComptimePtrStore( }; // The payload always has offset 0. If it's big enough // to represent the whole load type, we can use it. - if (try sema.typeAbiSize(payload.typeOf(zcu)) >= need_bytes) { + if (try payload.typeOf(zcu).abiSizeSema(pt) >= need_bytes) { cur_val = payload; } else { break; |
