diff options
| author | Jacob Young <jacobly0@users.noreply.github.com> | 2024-06-15 16:10:53 -0400 |
|---|---|---|
| committer | Jacob Young <jacobly0@users.noreply.github.com> | 2024-07-07 22:59:52 -0400 |
| commit | 525f341f33af9b8aad53931fd5511f00a82cb090 (patch) | |
| tree | cec3280498c1122858580946ac5e31f8feb807ce /src/Sema | |
| parent | 8f20e81b8816aadd8ceb1b04bd3727cc1d124464 (diff) | |
| download | zig-525f341f33af9b8aad53931fd5511f00a82cb090.tar.gz zig-525f341f33af9b8aad53931fd5511f00a82cb090.zip | |
Zcu: introduce `PerThread` and pass to all the functions
Diffstat (limited to 'src/Sema')
| -rw-r--r-- | src/Sema/bitcast.zig | 188 | ||||
| -rw-r--r-- | src/Sema/comptime_ptr_access.zig | 111 |
2 files changed, 153 insertions, 146 deletions
diff --git a/src/Sema/bitcast.zig b/src/Sema/bitcast.zig index 3c3ccdbfaa..c5155dec63 100644 --- a/src/Sema/bitcast.zig +++ b/src/Sema/bitcast.zig @@ -69,7 +69,8 @@ fn bitCastInner( host_bits: u64, bit_offset: u64, ) BitCastError!Value { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const endian = zcu.getTarget().cpu.arch.endian(); if (dest_ty.toIntern() == val.typeOf(zcu).toIntern() and bit_offset == 0) { @@ -78,29 +79,29 @@ fn bitCastInner( const val_ty = val.typeOf(zcu); - try val_ty.resolveLayout(zcu); - try dest_ty.resolveLayout(zcu); + try val_ty.resolveLayout(pt); + try dest_ty.resolveLayout(pt); assert(val_ty.hasWellDefinedLayout(zcu)); const abi_pad_bits, const host_pad_bits = if (host_bits > 0) - .{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) } + .{ val_ty.abiSize(pt) * 8 - host_bits, host_bits - val_ty.bitSize(pt) } else - .{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 }; + .{ val_ty.abiSize(pt) * 8 - val_ty.bitSize(pt), 0 }; const skip_bits = switch (endian) { .little => bit_offset + byte_offset * 8, .big => if (host_bits > 0) - val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset + val_ty.abiSize(pt) * 8 - byte_offset * 8 - host_bits + bit_offset else - val_ty.abiSize(zcu) * 8 - byte_offset * 8 - dest_ty.bitSize(zcu), + val_ty.abiSize(pt) * 8 - byte_offset * 8 - dest_ty.bitSize(pt), }; var unpack: UnpackValueBits = .{ - .zcu = zcu, + .pt = sema.pt, .arena = sema.arena, .skip_bits = skip_bits, - .remaining_bits = dest_ty.bitSize(zcu), + .remaining_bits = dest_ty.bitSize(pt), .unpacked = std.ArrayList(InternPool.Index).init(sema.arena), }; switch (endian) { @@ -116,7 +117,7 @@ fn bitCastInner( try unpack.padding(host_pad_bits); var pack: PackValueBits = .{ - .zcu = zcu, + .pt = sema.pt, .arena = sema.arena, .unpacked = unpack.unpacked.items, }; @@ -131,33 +132,34 @@ fn bitCastSpliceInner( host_bits: u64, bit_offset: u64, ) BitCastError!Value { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const endian = zcu.getTarget().cpu.arch.endian(); const val_ty = val.typeOf(zcu); const splice_val_ty = splice_val.typeOf(zcu); - try val_ty.resolveLayout(zcu); - try splice_val_ty.resolveLayout(zcu); + try val_ty.resolveLayout(pt); + try splice_val_ty.resolveLayout(pt); - const splice_bits = splice_val_ty.bitSize(zcu); + const splice_bits = splice_val_ty.bitSize(pt); const splice_offset = switch (endian) { .little => bit_offset + byte_offset * 8, .big => if (host_bits > 0) - val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset + val_ty.abiSize(pt) * 8 - byte_offset * 8 - host_bits + bit_offset else - val_ty.abiSize(zcu) * 8 - byte_offset * 8 - splice_bits, + val_ty.abiSize(pt) * 8 - byte_offset * 8 - splice_bits, }; - assert(splice_offset + splice_bits <= val_ty.abiSize(zcu) * 8); + assert(splice_offset + splice_bits <= val_ty.abiSize(pt) * 8); const abi_pad_bits, const host_pad_bits = if (host_bits > 0) - .{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) } + .{ val_ty.abiSize(pt) * 8 - host_bits, host_bits - val_ty.bitSize(pt) } else - .{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 }; + .{ val_ty.abiSize(pt) * 8 - val_ty.bitSize(pt), 0 }; var unpack: UnpackValueBits = .{ - .zcu = zcu, + .pt = pt, .arena = sema.arena, .skip_bits = 0, .remaining_bits = splice_offset, @@ -179,7 +181,7 @@ fn bitCastSpliceInner( try unpack.add(splice_val); unpack.skip_bits = splice_offset + splice_bits; - unpack.remaining_bits = val_ty.abiSize(zcu) * 8 - splice_offset - splice_bits; + unpack.remaining_bits = val_ty.abiSize(pt) * 8 - splice_offset - splice_bits; switch (endian) { .little => { try unpack.add(val); @@ -193,7 +195,7 @@ fn bitCastSpliceInner( try unpack.padding(host_pad_bits); var pack: PackValueBits = .{ - .zcu = zcu, + .pt = pt, .arena = sema.arena, .unpacked = unpack.unpacked.items, }; @@ -209,7 +211,7 @@ fn bitCastSpliceInner( /// of values in *packed* memory - therefore, on big-endian targets, the first element of this /// list contains bits from the *final* byte of the value. const UnpackValueBits = struct { - zcu: *Zcu, + pt: Zcu.PerThread, arena: Allocator, skip_bits: u64, remaining_bits: u64, @@ -217,7 +219,8 @@ const UnpackValueBits = struct { unpacked: std.ArrayList(InternPool.Index), fn add(unpack: *UnpackValueBits, val: Value) BitCastError!void { - const zcu = unpack.zcu; + const pt = unpack.pt; + const zcu = pt.zcu; const endian = zcu.getTarget().cpu.arch.endian(); const ip = &zcu.intern_pool; @@ -226,7 +229,7 @@ const UnpackValueBits = struct { } const ty = val.typeOf(zcu); - const bit_size = ty.bitSize(zcu); + const bit_size = ty.bitSize(pt); if (unpack.skip_bits >= bit_size) { unpack.skip_bits -= bit_size; @@ -279,7 +282,7 @@ const UnpackValueBits = struct { .little => i, .big => len - i - 1, }; - const elem_val = try val.elemValue(zcu, real_idx); + const elem_val = try val.elemValue(pt, real_idx); try unpack.add(elem_val); } }, @@ -288,7 +291,7 @@ const UnpackValueBits = struct { // The final element does not have trailing padding. // Elements are reversed in packed memory on BE targets. const elem_ty = ty.childType(zcu); - const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu); + const pad_bits = elem_ty.abiSize(pt) * 8 - elem_ty.bitSize(pt); const len = ty.arrayLen(zcu); const maybe_sent = ty.sentinel(zcu); @@ -303,7 +306,7 @@ const UnpackValueBits = struct { .little => i, .big => len - i - 1, }; - const elem_val = try val.elemValue(zcu, @intCast(real_idx)); + const elem_val = try val.elemValue(pt, @intCast(real_idx)); try unpack.add(elem_val); if (i != len - 1) try unpack.padding(pad_bits); } @@ -320,12 +323,12 @@ const UnpackValueBits = struct { var cur_bit_off: u64 = 0; var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip); while (it.next()) |field_idx| { - const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8; + const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8; const pad_bits = want_bit_off - cur_bit_off; - const field_val = try val.fieldValue(zcu, field_idx); + const field_val = try val.fieldValue(pt, field_idx); try unpack.padding(pad_bits); try unpack.add(field_val); - cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(zcu); + cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(pt); } // Add trailing padding bits. try unpack.padding(bit_size - cur_bit_off); @@ -334,13 +337,13 @@ const UnpackValueBits = struct { var cur_bit_off: u64 = bit_size; var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip); while (it.next()) |field_idx| { - const field_val = try val.fieldValue(zcu, field_idx); + const field_val = try val.fieldValue(pt, field_idx); const field_ty = field_val.typeOf(zcu); - const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu); + const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8 + field_ty.bitSize(pt); const pad_bits = cur_bit_off - want_bit_off; try unpack.padding(pad_bits); try unpack.add(field_val); - cur_bit_off = want_bit_off - field_ty.bitSize(zcu); + cur_bit_off = want_bit_off - field_ty.bitSize(pt); } assert(cur_bit_off == 0); }, @@ -349,7 +352,7 @@ const UnpackValueBits = struct { // Just add all fields in order. There are no padding bits. // This is identical between LE and BE targets. for (0..ty.structFieldCount(zcu)) |i| { - const field_val = try val.fieldValue(zcu, i); + const field_val = try val.fieldValue(pt, i); try unpack.add(field_val); } }, @@ -363,7 +366,7 @@ const UnpackValueBits = struct { // This correctly handles the case where `tag == .none`, since the payload is then // either an integer or a byte array, both of which we can unpack. const payload_val = Value.fromInterned(un.val); - const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(zcu); + const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(pt); if (endian == .little or ty.containerLayout(zcu) == .@"packed") { try unpack.add(payload_val); try unpack.padding(pad_bits); @@ -377,31 +380,31 @@ const UnpackValueBits = struct { fn padding(unpack: *UnpackValueBits, pad_bits: u64) BitCastError!void { if (pad_bits == 0) return; - const zcu = unpack.zcu; + const pt = unpack.pt; // Figure out how many full bytes and leftover bits there are. const bytes = pad_bits / 8; const bits = pad_bits % 8; // Add undef u8 values for the bytes... - const undef_u8 = try zcu.undefValue(Type.u8); + const undef_u8 = try pt.undefValue(Type.u8); for (0..@intCast(bytes)) |_| { try unpack.primitive(undef_u8); } // ...and an undef int for the leftover bits. if (bits == 0) return; - const bits_ty = try zcu.intType(.unsigned, @intCast(bits)); - const bits_val = try zcu.undefValue(bits_ty); + const bits_ty = try pt.intType(.unsigned, @intCast(bits)); + const bits_val = try pt.undefValue(bits_ty); try unpack.primitive(bits_val); } fn primitive(unpack: *UnpackValueBits, val: Value) BitCastError!void { - const zcu = unpack.zcu; + const pt = unpack.pt; if (unpack.remaining_bits == 0) { return; } - const ty = val.typeOf(zcu); - const bit_size = ty.bitSize(zcu); + const ty = val.typeOf(pt.zcu); + const bit_size = ty.bitSize(pt); // Note that this skips all zero-bit types. if (unpack.skip_bits >= bit_size) { @@ -425,21 +428,21 @@ const UnpackValueBits = struct { } fn splitPrimitive(unpack: *UnpackValueBits, val: Value, bit_offset: u64, bit_count: u64) BitCastError!void { - const zcu = unpack.zcu; - const ty = val.typeOf(zcu); + const pt = unpack.pt; + const ty = val.typeOf(pt.zcu); - const val_bits = ty.bitSize(zcu); + const val_bits = ty.bitSize(pt); assert(bit_offset + bit_count <= val_bits); - switch (zcu.intern_pool.indexToKey(val.toIntern())) { + switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) { // In the `ptr` case, this will return `error.ReinterpretDeclRef` // if we're trying to split a non-integer pointer value. .int, .float, .enum_tag, .ptr, .opt => { // This @intCast is okay because no primitive can exceed the size of a u16. - const int_ty = try zcu.intType(.unsigned, @intCast(bit_count)); + const int_ty = try unpack.pt.intType(.unsigned, @intCast(bit_count)); const buf = try unpack.arena.alloc(u8, @intCast((val_bits + 7) / 8)); - try val.writeToPackedMemory(ty, zcu, buf, 0); - const sub_val = try Value.readFromPackedMemory(int_ty, zcu, buf, @intCast(bit_offset), unpack.arena); + try val.writeToPackedMemory(ty, unpack.pt, buf, 0); + const sub_val = try Value.readFromPackedMemory(int_ty, unpack.pt, buf, @intCast(bit_offset), unpack.arena); try unpack.primitive(sub_val); }, .undef => try unpack.padding(bit_count), @@ -456,13 +459,14 @@ const UnpackValueBits = struct { /// reconstructs a value of an arbitrary type, with correct handling of `undefined` /// values and of pointers which align in virtual memory. const PackValueBits = struct { - zcu: *Zcu, + pt: Zcu.PerThread, arena: Allocator, bit_offset: u64 = 0, unpacked: []const InternPool.Index, fn get(pack: *PackValueBits, ty: Type) BitCastError!Value { - const zcu = pack.zcu; + const pt = pack.pt; + const zcu = pt.zcu; const endian = zcu.getTarget().cpu.arch.endian(); const ip = &zcu.intern_pool; const arena = pack.arena; @@ -485,7 +489,7 @@ const PackValueBits = struct { } }, } - return Value.fromInterned(try zcu.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = elems }, } })); @@ -495,12 +499,12 @@ const PackValueBits = struct { const len = ty.arrayLen(zcu); const elem_ty = ty.childType(zcu); const maybe_sent = ty.sentinel(zcu); - const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu); + const pad_bits = elem_ty.abiSize(pt) * 8 - elem_ty.bitSize(pt); const elems = try arena.alloc(InternPool.Index, @intCast(len)); if (endian == .big and maybe_sent != null) { // TODO: validate sentinel was preserved! - try pack.padding(elem_ty.bitSize(zcu)); + try pack.padding(elem_ty.bitSize(pt)); if (len != 0) try pack.padding(pad_bits); } @@ -516,10 +520,10 @@ const PackValueBits = struct { if (endian == .little and maybe_sent != null) { // TODO: validate sentinel was preserved! if (len != 0) try pack.padding(pad_bits); - try pack.padding(elem_ty.bitSize(zcu)); + try pack.padding(elem_ty.bitSize(pt)); } - return Value.fromInterned(try zcu.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = elems }, } })); @@ -534,23 +538,23 @@ const PackValueBits = struct { var cur_bit_off: u64 = 0; var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip); while (it.next()) |field_idx| { - const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8; + const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8; try pack.padding(want_bit_off - cur_bit_off); const field_ty = ty.structFieldType(field_idx, zcu); elems[field_idx] = (try pack.get(field_ty)).toIntern(); - cur_bit_off = want_bit_off + field_ty.bitSize(zcu); + cur_bit_off = want_bit_off + field_ty.bitSize(pt); } - try pack.padding(ty.bitSize(zcu) - cur_bit_off); + try pack.padding(ty.bitSize(pt) - cur_bit_off); }, .big => { - var cur_bit_off: u64 = ty.bitSize(zcu); + var cur_bit_off: u64 = ty.bitSize(pt); var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip); while (it.next()) |field_idx| { const field_ty = ty.structFieldType(field_idx, zcu); - const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu); + const want_bit_off = ty.structFieldOffset(field_idx, pt) * 8 + field_ty.bitSize(pt); try pack.padding(cur_bit_off - want_bit_off); elems[field_idx] = (try pack.get(field_ty)).toIntern(); - cur_bit_off = want_bit_off - field_ty.bitSize(zcu); + cur_bit_off = want_bit_off - field_ty.bitSize(pt); } assert(cur_bit_off == 0); }, @@ -559,10 +563,10 @@ const PackValueBits = struct { // Fill those values now. for (elems, 0..) |*elem, field_idx| { if (elem.* != .none) continue; - const val = (try ty.structFieldValueComptime(zcu, field_idx)).?; + const val = (try ty.structFieldValueComptime(pt, field_idx)).?; elem.* = val.toIntern(); } - return Value.fromInterned(try zcu.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = elems }, } })); @@ -575,7 +579,7 @@ const PackValueBits = struct { const field_ty = ty.structFieldType(i, zcu); elem.* = (try pack.get(field_ty)).toIntern(); } - return Value.fromInterned(try zcu.intern(.{ .aggregate = .{ + return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = elems }, } })); @@ -591,7 +595,7 @@ const PackValueBits = struct { const prev_unpacked = pack.unpacked; const prev_bit_offset = pack.bit_offset; - const backing_ty = try ty.unionBackingType(zcu); + const backing_ty = try ty.unionBackingType(pt); backing: { const backing_val = pack.get(backing_ty) catch |err| switch (err) { @@ -607,7 +611,7 @@ const PackValueBits = struct { pack.bit_offset = prev_bit_offset; break :backing; } - return Value.fromInterned(try zcu.intern(.{ .un = .{ + return Value.fromInterned(try pt.intern(.{ .un = .{ .ty = ty.toIntern(), .tag = .none, .val = backing_val.toIntern(), @@ -618,16 +622,16 @@ const PackValueBits = struct { for (field_order, 0..) |*f, i| f.* = @intCast(i); // Sort `field_order` to put the fields with the largest bit sizes first. const SizeSortCtx = struct { - zcu: *Zcu, + pt: Zcu.PerThread, field_types: []const InternPool.Index, fn lessThan(ctx: @This(), a_idx: u32, b_idx: u32) bool { const a_ty = Type.fromInterned(ctx.field_types[a_idx]); const b_ty = Type.fromInterned(ctx.field_types[b_idx]); - return a_ty.bitSize(ctx.zcu) > b_ty.bitSize(ctx.zcu); + return a_ty.bitSize(ctx.pt) > b_ty.bitSize(ctx.pt); } }; std.mem.sortUnstable(u32, field_order, SizeSortCtx{ - .zcu = zcu, + .pt = pt, .field_types = zcu.typeToUnion(ty).?.field_types.get(ip), }, SizeSortCtx.lessThan); @@ -635,7 +639,7 @@ const PackValueBits = struct { for (field_order) |field_idx| { const field_ty = Type.fromInterned(zcu.typeToUnion(ty).?.field_types.get(ip)[field_idx]); - const pad_bits = ty.bitSize(zcu) - field_ty.bitSize(zcu); + const pad_bits = ty.bitSize(pt) - field_ty.bitSize(pt); if (!padding_after) try pack.padding(pad_bits); const field_val = pack.get(field_ty) catch |err| switch (err) { error.ReinterpretDeclRef => { @@ -651,8 +655,8 @@ const PackValueBits = struct { pack.bit_offset = prev_bit_offset; continue; } - const tag_val = try zcu.enumValueFieldIndex(ty.unionTagTypeHypothetical(zcu), field_idx); - return Value.fromInterned(try zcu.intern(.{ .un = .{ + const tag_val = try pt.enumValueFieldIndex(ty.unionTagTypeHypothetical(zcu), field_idx); + return Value.fromInterned(try pt.intern(.{ .un = .{ .ty = ty.toIntern(), .tag = tag_val.toIntern(), .val = field_val.toIntern(), @@ -662,7 +666,7 @@ const PackValueBits = struct { // No field could represent the value. Just do whatever happens when we try to read // the backing type - either `undefined` or `error.ReinterpretDeclRef`. const backing_val = try pack.get(backing_ty); - return Value.fromInterned(try zcu.intern(.{ .un = .{ + return Value.fromInterned(try pt.intern(.{ .un = .{ .ty = ty.toIntern(), .tag = .none, .val = backing_val.toIntern(), @@ -677,14 +681,14 @@ const PackValueBits = struct { } fn primitive(pack: *PackValueBits, want_ty: Type) BitCastError!Value { - const zcu = pack.zcu; - const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(zcu)); + const pt = pack.pt; + const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(pt)); for (vals) |val| { - if (!Value.fromInterned(val).isUndef(zcu)) break; + if (!Value.fromInterned(val).isUndef(pt.zcu)) break; } else { // All bits of the value are `undefined`. - return zcu.undefValue(want_ty); + return pt.undefValue(want_ty); } // TODO: we need to decide how to handle partially-undef values here. @@ -702,9 +706,9 @@ const PackValueBits = struct { ptr_cast: { if (vals.len != 1) break :ptr_cast; const val = Value.fromInterned(vals[0]); - if (!val.typeOf(zcu).isPtrAtRuntime(zcu)) break :ptr_cast; - if (!want_ty.isPtrAtRuntime(zcu)) break :ptr_cast; - return zcu.getCoerced(val, want_ty); + if (!val.typeOf(pt.zcu).isPtrAtRuntime(pt.zcu)) break :ptr_cast; + if (!want_ty.isPtrAtRuntime(pt.zcu)) break :ptr_cast; + return pt.getCoerced(val, want_ty); } // Reinterpret via an in-memory buffer. @@ -712,8 +716,8 @@ const PackValueBits = struct { var buf_bits: u64 = 0; for (vals) |ip_val| { const val = Value.fromInterned(ip_val); - const ty = val.typeOf(zcu); - buf_bits += ty.bitSize(zcu); + const ty = val.typeOf(pt.zcu); + buf_bits += ty.bitSize(pt); } const buf = try pack.arena.alloc(u8, @intCast((buf_bits + 7) / 8)); @@ -722,25 +726,25 @@ const PackValueBits = struct { var cur_bit_off: usize = 0; for (vals) |ip_val| { const val = Value.fromInterned(ip_val); - const ty = val.typeOf(zcu); - if (!val.isUndef(zcu)) { - try val.writeToPackedMemory(ty, zcu, buf, cur_bit_off); + const ty = val.typeOf(pt.zcu); + if (!val.isUndef(pt.zcu)) { + try val.writeToPackedMemory(ty, pt, buf, cur_bit_off); } - cur_bit_off += @intCast(ty.bitSize(zcu)); + cur_bit_off += @intCast(ty.bitSize(pt)); } - return Value.readFromPackedMemory(want_ty, zcu, buf, @intCast(bit_offset), pack.arena); + return Value.readFromPackedMemory(want_ty, pt, buf, @intCast(bit_offset), pack.arena); } fn prepareBits(pack: *PackValueBits, need_bits: u64) struct { []const InternPool.Index, u64 } { if (need_bits == 0) return .{ &.{}, 0 }; - const zcu = pack.zcu; + const pt = pack.pt; var bits: u64 = 0; var len: usize = 0; while (bits < pack.bit_offset + need_bits) { - bits += Value.fromInterned(pack.unpacked[len]).typeOf(zcu).bitSize(zcu); + bits += Value.fromInterned(pack.unpacked[len]).typeOf(pt.zcu).bitSize(pt); len += 1; } @@ -753,7 +757,7 @@ const PackValueBits = struct { pack.bit_offset = 0; } else { pack.unpacked = pack.unpacked[len - 1 ..]; - pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(zcu).bitSize(zcu) - extra_bits; + pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(pt.zcu).bitSize(pt) - extra_bits; } return .{ result_vals, result_offset }; diff --git a/src/Sema/comptime_ptr_access.zig b/src/Sema/comptime_ptr_access.zig index d8e638ca26..79e39cabfe 100644 --- a/src/Sema/comptime_ptr_access.zig +++ b/src/Sema/comptime_ptr_access.zig @@ -12,19 +12,19 @@ pub const ComptimeLoadResult = union(enum) { }; pub fn loadComptimePtr(sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Value) !ComptimeLoadResult { - const zcu = sema.mod; - const ptr_info = ptr.typeOf(zcu).ptrInfo(zcu); + const pt = sema.pt; + const ptr_info = ptr.typeOf(pt.zcu).ptrInfo(pt.zcu); // TODO: host size for vectors is terrible const host_bits = switch (ptr_info.flags.vector_index) { .none => ptr_info.packed_offset.host_size * 8, - else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu), + else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(pt), }; const bit_offset = if (host_bits != 0) bit_offset: { - const child_bits = Type.fromInterned(ptr_info.child).bitSize(zcu); + const child_bits = Type.fromInterned(ptr_info.child).bitSize(pt); const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) { .none => 0, .runtime => return .runtime_load, - else => |idx| switch (zcu.getTarget().cpu.arch.endian()) { + else => |idx| switch (pt.zcu.getTarget().cpu.arch.endian()) { .little => child_bits * @intFromEnum(idx), .big => host_bits - child_bits * (@intFromEnum(idx) + 1), // element order reversed on big endian }, @@ -60,28 +60,29 @@ pub fn storeComptimePtr( ptr: Value, store_val: Value, ) !ComptimeStoreResult { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const ptr_info = ptr.typeOf(zcu).ptrInfo(zcu); assert(store_val.typeOf(zcu).toIntern() == ptr_info.child); // TODO: host size for vectors is terrible const host_bits = switch (ptr_info.flags.vector_index) { .none => ptr_info.packed_offset.host_size * 8, - else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu), + else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(pt), }; const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) { .none => 0, .runtime => return .runtime_store, else => |idx| switch (zcu.getTarget().cpu.arch.endian()) { - .little => Type.fromInterned(ptr_info.child).bitSize(zcu) * @intFromEnum(idx), - .big => host_bits - Type.fromInterned(ptr_info.child).bitSize(zcu) * (@intFromEnum(idx) + 1), // element order reversed on big endian + .little => Type.fromInterned(ptr_info.child).bitSize(pt) * @intFromEnum(idx), + .big => host_bits - Type.fromInterned(ptr_info.child).bitSize(pt) * (@intFromEnum(idx) + 1), // element order reversed on big endian }, }; const pseudo_store_ty = if (host_bits > 0) t: { - const need_bits = Type.fromInterned(ptr_info.child).bitSize(zcu); + const need_bits = Type.fromInterned(ptr_info.child).bitSize(pt); if (need_bits + bit_offset > host_bits) { return .exceeds_host_size; } - break :t try zcu.intType(.unsigned, @intCast(host_bits)); + break :t try sema.pt.intType(.unsigned, @intCast(host_bits)); } else Type.fromInterned(ptr_info.child); const strat = try prepareComptimePtrStore(sema, block, src, ptr, pseudo_store_ty, 0); @@ -103,7 +104,7 @@ pub fn storeComptimePtr( .needed_well_defined => |ty| return .{ .needed_well_defined = ty }, .out_of_bounds => |ty| return .{ .out_of_bounds = ty }, }; - const expected = try expected_mv.intern(zcu, sema.arena); + const expected = try expected_mv.intern(pt, sema.arena); if (store_val.toIntern() != expected.toIntern()) { return .{ .comptime_field_mismatch = expected }; } @@ -126,14 +127,14 @@ pub fn storeComptimePtr( switch (strat) { .direct => |direct| { const want_ty = direct.val.typeOf(zcu); - const coerced_store_val = try zcu.getCoerced(store_val, want_ty); + const coerced_store_val = try pt.getCoerced(store_val, want_ty); direct.val.* = .{ .interned = coerced_store_val.toIntern() }; return .success; }, .index => |index| { const want_ty = index.val.typeOf(zcu).childType(zcu); - const coerced_store_val = try zcu.getCoerced(store_val, want_ty); - try index.val.setElem(zcu, sema.arena, @intCast(index.elem_index), .{ .interned = coerced_store_val.toIntern() }); + const coerced_store_val = try pt.getCoerced(store_val, want_ty); + try index.val.setElem(pt, sema.arena, @intCast(index.elem_index), .{ .interned = coerced_store_val.toIntern() }); return .success; }, .flat_index => |flat| { @@ -149,7 +150,7 @@ pub fn storeComptimePtr( // Better would be to gather all the store targets into an array. var index: u64 = flat.flat_elem_index + idx; const val_ptr, const final_idx = (try recursiveIndex(sema, flat.val, &index)).?; - try val_ptr.setElem(zcu, sema.arena, @intCast(final_idx), .{ .interned = elem }); + try val_ptr.setElem(pt, sema.arena, @intCast(final_idx), .{ .interned = elem }); } return .success; }, @@ -165,9 +166,9 @@ pub fn storeComptimePtr( .direct => |direct| .{ direct.val, 0 }, .index => |index| .{ index.val, - index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(zcu), + index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(pt), }, - .flat_index => |flat| .{ flat.val, flat.flat_elem_index * flat.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(zcu) }, + .flat_index => |flat| .{ flat.val, flat.flat_elem_index * flat.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(pt) }, .reinterpret => |reinterpret| .{ reinterpret.val, reinterpret.byte_offset }, else => unreachable, }; @@ -181,7 +182,7 @@ pub fn storeComptimePtr( } const new_val = try sema.bitCastSpliceVal( - try val_ptr.intern(zcu, sema.arena), + try val_ptr.intern(pt, sema.arena), store_val, byte_offset, host_bits, @@ -205,7 +206,8 @@ fn loadComptimePtrInner( /// before `load_ty`. Otherwise, it is ignored and may be `undefined`. array_offset: u64, ) !ComptimeLoadResult { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const ptr = switch (ip.indexToKey(ptr_val.toIntern())) { @@ -263,7 +265,7 @@ fn loadComptimePtrInner( const load_one_ty, const load_count = load_ty.arrayBase(zcu); const count = if (load_one_ty.toIntern() == base_ty.toIntern()) load_count else 1; - const want_ty = try zcu.arrayType(.{ + const want_ty = try sema.pt.arrayType(.{ .len = count, .child = base_ty.toIntern(), }); @@ -285,7 +287,7 @@ fn loadComptimePtrInner( const agg_ty = agg_val.typeOf(zcu); switch (agg_ty.zigTypeTag(zcu)) { - .Struct, .Pointer => break :val try agg_val.getElem(zcu, @intCast(base_index.index)), + .Struct, .Pointer => break :val try agg_val.getElem(sema.pt, @intCast(base_index.index)), .Union => { const tag_val: Value, const payload_mv: MutableValue = switch (agg_val) { .un => |un| .{ Value.fromInterned(un.tag), un.payload.* }, @@ -427,7 +429,7 @@ fn loadComptimePtrInner( const next_elem_off = elem_size * (elem_idx + 1); if (cur_offset + need_bytes <= next_elem_off) { // We can look at a single array element. - cur_val = try cur_val.getElem(zcu, @intCast(elem_idx)); + cur_val = try cur_val.getElem(sema.pt, @intCast(elem_idx)); cur_offset -= elem_idx * elem_size; } else { break; @@ -437,10 +439,10 @@ fn loadComptimePtrInner( .auto => unreachable, // ill-defined layout .@"packed" => break, // let the bitcast logic handle this .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| { - const start_off = cur_ty.structFieldOffset(field_idx, zcu); + const start_off = cur_ty.structFieldOffset(field_idx, pt); const end_off = start_off + try sema.typeAbiSize(cur_ty.structFieldType(field_idx, zcu)); if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) { - cur_val = try cur_val.getElem(zcu, field_idx); + cur_val = try cur_val.getElem(sema.pt, field_idx); cur_offset -= start_off; break; } @@ -482,7 +484,7 @@ fn loadComptimePtrInner( } const result_val = try sema.bitCastVal( - try cur_val.intern(zcu, sema.arena), + try cur_val.intern(sema.pt, sema.arena), load_ty, cur_offset, host_bits, @@ -564,7 +566,8 @@ fn prepareComptimePtrStore( /// before `store_ty`. Otherwise, it is ignored and may be `undefined`. array_offset: u64, ) !ComptimeStoreStrategy { - const zcu = sema.mod; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const ptr = switch (ip.indexToKey(ptr_val.toIntern())) { @@ -587,14 +590,14 @@ fn prepareComptimePtrStore( const eu_val_ptr, const alloc = switch (try prepareComptimePtrStore(sema, block, src, base_ptr, base_ty, undefined)) { .direct => |direct| .{ direct.val, direct.alloc }, .index => |index| .{ - try index.val.elem(zcu, sema.arena, @intCast(index.elem_index)), + try index.val.elem(pt, sema.arena, @intCast(index.elem_index)), index.alloc, }, .flat_index => unreachable, // base_ty is not an array .reinterpret => unreachable, // base_ty has ill-defined layout else => |err| return err, }; - try eu_val_ptr.unintern(zcu, sema.arena, false, false); + try eu_val_ptr.unintern(pt, sema.arena, false, false); switch (eu_val_ptr.*) { .interned => |ip_index| switch (ip.indexToKey(ip_index)) { .undef => return .undef, @@ -614,14 +617,14 @@ fn prepareComptimePtrStore( const opt_val_ptr, const alloc = switch (try prepareComptimePtrStore(sema, block, src, base_ptr, base_ty, undefined)) { .direct => |direct| .{ direct.val, direct.alloc }, .index => |index| .{ - try index.val.elem(zcu, sema.arena, @intCast(index.elem_index)), + try index.val.elem(pt, sema.arena, @intCast(index.elem_index)), index.alloc, }, .flat_index => unreachable, // base_ty is not an array .reinterpret => unreachable, // base_ty has ill-defined layout else => |err| return err, }; - try opt_val_ptr.unintern(zcu, sema.arena, false, false); + try opt_val_ptr.unintern(pt, sema.arena, false, false); switch (opt_val_ptr.*) { .interned => |ip_index| switch (ip.indexToKey(ip_index)) { .undef => return .undef, @@ -648,7 +651,7 @@ fn prepareComptimePtrStore( const store_one_ty, const store_count = store_ty.arrayBase(zcu); const count = if (store_one_ty.toIntern() == base_ty.toIntern()) store_count else 1; - const want_ty = try zcu.arrayType(.{ + const want_ty = try pt.arrayType(.{ .len = count, .child = base_ty.toIntern(), }); @@ -668,7 +671,7 @@ fn prepareComptimePtrStore( const agg_val, const alloc = switch (try prepareComptimePtrStore(sema, block, src, base_ptr, base_ty, undefined)) { .direct => |direct| .{ direct.val, direct.alloc }, .index => |index| .{ - try index.val.elem(zcu, sema.arena, @intCast(index.elem_index)), + try index.val.elem(pt, sema.arena, @intCast(index.elem_index)), index.alloc, }, .flat_index => unreachable, // base_ty is not an array @@ -679,14 +682,14 @@ fn prepareComptimePtrStore( const agg_ty = agg_val.typeOf(zcu); switch (agg_ty.zigTypeTag(zcu)) { .Struct, .Pointer => break :strat .{ .direct = .{ - .val = try agg_val.elem(zcu, sema.arena, @intCast(base_index.index)), + .val = try agg_val.elem(pt, sema.arena, @intCast(base_index.index)), .alloc = alloc, } }, .Union => { if (agg_val.* == .interned and Value.fromInterned(agg_val.interned).isUndef(zcu)) { return .undef; } - try agg_val.unintern(zcu, sema.arena, false, false); + try agg_val.unintern(pt, sema.arena, false, false); const un = agg_val.un; const tag_ty = agg_ty.unionTagTypeHypothetical(zcu); if (tag_ty.enumTagFieldIndex(Value.fromInterned(un.tag), zcu).? != base_index.index) { @@ -847,7 +850,7 @@ fn prepareComptimePtrStore( const next_elem_off = elem_size * (elem_idx + 1); if (cur_offset + need_bytes <= next_elem_off) { // We can look at a single array element. - cur_val = try cur_val.elem(zcu, sema.arena, @intCast(elem_idx)); + cur_val = try cur_val.elem(pt, sema.arena, @intCast(elem_idx)); cur_offset -= elem_idx * elem_size; } else { break; @@ -857,10 +860,10 @@ fn prepareComptimePtrStore( .auto => unreachable, // ill-defined layout .@"packed" => break, // let the bitcast logic handle this .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| { - const start_off = cur_ty.structFieldOffset(field_idx, zcu); + const start_off = cur_ty.structFieldOffset(field_idx, pt); const end_off = start_off + try sema.typeAbiSize(cur_ty.structFieldType(field_idx, zcu)); if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) { - cur_val = try cur_val.elem(zcu, sema.arena, field_idx); + cur_val = try cur_val.elem(pt, sema.arena, field_idx); cur_offset -= start_off; break; } @@ -874,7 +877,7 @@ fn prepareComptimePtrStore( // Otherwise, we might traverse into a union field which doesn't allow pointers. // Figure out a solution! if (true) break; - try cur_val.unintern(zcu, sema.arena, false, false); + try cur_val.unintern(pt, sema.arena, false, false); const payload = switch (cur_val.*) { .un => |un| un.payload, else => unreachable, @@ -918,7 +921,7 @@ fn flattenArray( ) Allocator.Error!void { if (next_idx.* == out.len) return; - const zcu = sema.mod; + const zcu = sema.pt.zcu; const ty = val.typeOf(zcu); const base_elem_count = ty.arrayBase(zcu)[1]; @@ -928,7 +931,7 @@ fn flattenArray( } if (ty.zigTypeTag(zcu) != .Array) { - out[@intCast(next_idx.*)] = (try val.intern(zcu, sema.arena)).toIntern(); + out[@intCast(next_idx.*)] = (try val.intern(sema.pt, sema.arena)).toIntern(); next_idx.* += 1; return; } @@ -942,7 +945,7 @@ fn flattenArray( skip.* -= arr_base_elem_count; continue; } - try flattenArray(sema, try val.getElem(zcu, elem_idx), skip, next_idx, out); + try flattenArray(sema, try val.getElem(sema.pt, elem_idx), skip, next_idx, out); } if (ty.sentinel(zcu)) |s| { try flattenArray(sema, .{ .interned = s.toIntern() }, skip, next_idx, out); @@ -957,13 +960,13 @@ fn unflattenArray( elems: []const InternPool.Index, next_idx: *u64, ) Allocator.Error!Value { - const zcu = sema.mod; + const zcu = sema.pt.zcu; const arena = sema.arena; if (ty.zigTypeTag(zcu) != .Array) { const val = Value.fromInterned(elems[@intCast(next_idx.*)]); next_idx.* += 1; - return zcu.getCoerced(val, ty); + return sema.pt.getCoerced(val, ty); } const elem_ty = ty.childType(zcu); @@ -975,7 +978,7 @@ fn unflattenArray( // TODO: validate sentinel _ = try unflattenArray(sema, elem_ty, elems, next_idx); } - return Value.fromInterned(try zcu.intern(.{ .aggregate = .{ + return Value.fromInterned(try sema.pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = buf }, } })); @@ -990,25 +993,25 @@ fn recursiveIndex( mv: *MutableValue, index: *u64, ) !?struct { *MutableValue, u64 } { - const zcu = sema.mod; + const pt = sema.pt; - const ty = mv.typeOf(zcu); - assert(ty.zigTypeTag(zcu) == .Array); + const ty = mv.typeOf(pt.zcu); + assert(ty.zigTypeTag(pt.zcu) == .Array); - const ty_base_elems = ty.arrayBase(zcu)[1]; + const ty_base_elems = ty.arrayBase(pt.zcu)[1]; if (index.* >= ty_base_elems) { index.* -= ty_base_elems; return null; } - const elem_ty = ty.childType(zcu); - if (elem_ty.zigTypeTag(zcu) != .Array) { - assert(index.* < ty.arrayLenIncludingSentinel(zcu)); // should be handled by initial check + const elem_ty = ty.childType(pt.zcu); + if (elem_ty.zigTypeTag(pt.zcu) != .Array) { + assert(index.* < ty.arrayLenIncludingSentinel(pt.zcu)); // should be handled by initial check return .{ mv, index.* }; } - for (0..@intCast(ty.arrayLenIncludingSentinel(zcu))) |elem_index| { - if (try recursiveIndex(sema, try mv.elem(zcu, sema.arena, elem_index), index)) |result| { + for (0..@intCast(ty.arrayLenIncludingSentinel(pt.zcu))) |elem_index| { + if (try recursiveIndex(sema, try mv.elem(pt, sema.arena, elem_index), index)) |result| { return result; } } |
