aboutsummaryrefslogtreecommitdiff
path: root/src/Type.zig
diff options
context:
space:
mode:
Diffstat (limited to 'src/Type.zig')
-rw-r--r--src/Type.zig739
1 files changed, 377 insertions, 362 deletions
diff --git a/src/Type.zig b/src/Type.zig
index 49f1276070..57ac2310d5 100644
--- a/src/Type.zig
+++ b/src/Type.zig
@@ -136,16 +136,16 @@ pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt
pub const Formatter = std.fmt.Formatter(format2);
-pub fn fmt(ty: Type, module: *Module) Formatter {
+pub fn fmt(ty: Type, pt: Zcu.PerThread) Formatter {
return .{ .data = .{
.ty = ty,
- .module = module,
+ .pt = pt,
} };
}
const FormatContext = struct {
ty: Type,
- module: *Module,
+ pt: Zcu.PerThread,
};
fn format2(
@@ -156,7 +156,7 @@ fn format2(
) !void {
comptime assert(unused_format_string.len == 0);
_ = options;
- return print(ctx.ty, writer, ctx.module);
+ return print(ctx.ty, writer, ctx.pt);
}
pub fn fmtDebug(ty: Type) std.fmt.Formatter(dump) {
@@ -178,7 +178,8 @@ pub fn dump(
/// Prints a name suitable for `@typeName`.
/// TODO: take an `opt_sema` to pass to `fmtValue` when printing sentinels.
-pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void {
+pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error!void {
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| {
@@ -193,8 +194,8 @@ pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void
if (info.sentinel != .none) switch (info.flags.size) {
.One, .C => unreachable,
- .Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}),
- .Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}),
+ .Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(pt, null)}),
+ .Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(pt, null)}),
} else switch (info.flags.size) {
.One => try writer.writeAll("*"),
.Many => try writer.writeAll("[*]"),
@@ -208,7 +209,7 @@ pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void
const alignment = if (info.flags.alignment != .none)
info.flags.alignment
else
- Type.fromInterned(info.child).abiAlignment(mod);
+ Type.fromInterned(info.child).abiAlignment(pt);
try writer.print("align({d}", .{alignment.toByteUnits() orelse 0});
if (info.packed_offset.bit_offset != 0 or info.packed_offset.host_size != 0) {
@@ -230,39 +231,39 @@ pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void
if (info.flags.is_volatile) try writer.writeAll("volatile ");
if (info.flags.is_allowzero and info.flags.size != .C) try writer.writeAll("allowzero ");
- try print(Type.fromInterned(info.child), writer, mod);
+ try print(Type.fromInterned(info.child), writer, pt);
return;
},
.array_type => |array_type| {
if (array_type.sentinel == .none) {
try writer.print("[{d}]", .{array_type.len});
- try print(Type.fromInterned(array_type.child), writer, mod);
+ try print(Type.fromInterned(array_type.child), writer, pt);
} else {
try writer.print("[{d}:{}]", .{
array_type.len,
- Value.fromInterned(array_type.sentinel).fmtValue(mod, null),
+ Value.fromInterned(array_type.sentinel).fmtValue(pt, null),
});
- try print(Type.fromInterned(array_type.child), writer, mod);
+ try print(Type.fromInterned(array_type.child), writer, pt);
}
return;
},
.vector_type => |vector_type| {
try writer.print("@Vector({d}, ", .{vector_type.len});
- try print(Type.fromInterned(vector_type.child), writer, mod);
+ try print(Type.fromInterned(vector_type.child), writer, pt);
try writer.writeAll(")");
return;
},
.opt_type => |child| {
try writer.writeByte('?');
- return print(Type.fromInterned(child), writer, mod);
+ return print(Type.fromInterned(child), writer, pt);
},
.error_union_type => |error_union_type| {
- try print(Type.fromInterned(error_union_type.error_set_type), writer, mod);
+ try print(Type.fromInterned(error_union_type.error_set_type), writer, pt);
try writer.writeByte('!');
if (error_union_type.payload_type == .generic_poison_type) {
try writer.writeAll("anytype");
} else {
- try print(Type.fromInterned(error_union_type.payload_type), writer, mod);
+ try print(Type.fromInterned(error_union_type.payload_type), writer, pt);
}
return;
},
@@ -355,10 +356,10 @@ pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void
try writer.print("{}: ", .{anon_struct.names.get(ip)[i].fmt(&mod.intern_pool)});
}
- try print(Type.fromInterned(field_ty), writer, mod);
+ try print(Type.fromInterned(field_ty), writer, pt);
if (val != .none) {
- try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(mod, null)});
+ try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(pt, null)});
}
}
try writer.writeAll("}");
@@ -395,7 +396,7 @@ pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void
if (param_ty == .generic_poison_type) {
try writer.writeAll("anytype");
} else {
- try print(Type.fromInterned(param_ty), writer, mod);
+ try print(Type.fromInterned(param_ty), writer, pt);
}
}
if (fn_info.is_var_args) {
@@ -413,13 +414,13 @@ pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void
if (fn_info.return_type == .generic_poison_type) {
try writer.writeAll("anytype");
} else {
- try print(Type.fromInterned(fn_info.return_type), writer, mod);
+ try print(Type.fromInterned(fn_info.return_type), writer, pt);
}
},
.anyframe_type => |child| {
if (child == .none) return writer.writeAll("anyframe");
try writer.writeAll("anyframe->");
- return print(Type.fromInterned(child), writer, mod);
+ return print(Type.fromInterned(child), writer, pt);
},
// values, not types
@@ -475,10 +476,11 @@ const RuntimeBitsError = SemaError || error{NeedLazy};
/// may return false positives.
pub fn hasRuntimeBitsAdvanced(
ty: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
ignore_comptime_only: bool,
strat: ResolveStratLazy,
) RuntimeBitsError!bool {
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
return switch (ty.toIntern()) {
// False because it is a comptime-only type.
@@ -490,16 +492,16 @@ pub fn hasRuntimeBitsAdvanced(
// to comptime-only types do not, with the exception of function pointers.
if (ignore_comptime_only) return true;
return switch (strat) {
- .sema => !try ty.comptimeOnlyAdvanced(mod, .sema),
- .eager => !ty.comptimeOnly(mod),
+ .sema => !try ty.comptimeOnlyAdvanced(pt, .sema),
+ .eager => !ty.comptimeOnly(pt),
.lazy => error.NeedLazy,
};
},
.anyframe_type => true,
.array_type => |array_type| return array_type.lenIncludingSentinel() > 0 and
- try Type.fromInterned(array_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
+ try Type.fromInterned(array_type.child).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat),
.vector_type => |vector_type| return vector_type.len > 0 and
- try Type.fromInterned(vector_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
+ try Type.fromInterned(vector_type.child).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat),
.opt_type => |child| {
const child_ty = Type.fromInterned(child);
if (child_ty.isNoReturn(mod)) {
@@ -508,8 +510,8 @@ pub fn hasRuntimeBitsAdvanced(
}
if (ignore_comptime_only) return true;
return switch (strat) {
- .sema => !try child_ty.comptimeOnlyAdvanced(mod, .sema),
- .eager => !child_ty.comptimeOnly(mod),
+ .sema => !try child_ty.comptimeOnlyAdvanced(pt, .sema),
+ .eager => !child_ty.comptimeOnly(pt),
.lazy => error.NeedLazy,
};
},
@@ -580,14 +582,14 @@ pub fn hasRuntimeBitsAdvanced(
return true;
}
switch (strat) {
- .sema => try ty.resolveFields(mod),
+ .sema => try ty.resolveFields(pt),
.eager => assert(struct_type.haveFieldTypes(ip)),
.lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy,
}
for (0..struct_type.field_types.len) |i| {
if (struct_type.comptime_bits.getBit(ip, i)) continue;
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
- if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
+ if (try field_ty.hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat))
return true;
} else {
return false;
@@ -596,7 +598,7 @@ pub fn hasRuntimeBitsAdvanced(
.anon_struct_type => |tuple| {
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
if (val != .none) continue; // comptime field
- if (try Type.fromInterned(field_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true;
+ if (try Type.fromInterned(field_ty).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat)) return true;
}
return false;
},
@@ -617,21 +619,21 @@ pub fn hasRuntimeBitsAdvanced(
// tag_ty will be `none` if this union's tag type is not resolved yet,
// in which case we want control flow to continue down below.
if (tag_ty != .none and
- try Type.fromInterned(tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
+ try Type.fromInterned(tag_ty).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat))
{
return true;
}
},
}
switch (strat) {
- .sema => try ty.resolveFields(mod),
+ .sema => try ty.resolveFields(pt),
.eager => assert(union_type.flagsPtr(ip).status.haveFieldTypes()),
.lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes())
return error.NeedLazy,
}
for (0..union_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
- if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
+ if (try field_ty.hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat))
return true;
} else {
return false;
@@ -639,7 +641,7 @@ pub fn hasRuntimeBitsAdvanced(
},
.opaque_type => true,
- .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
+ .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).hasRuntimeBitsAdvanced(pt, ignore_comptime_only, strat),
// values, not types
.undef,
@@ -777,41 +779,41 @@ pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool {
};
}
-pub fn hasRuntimeBits(ty: Type, mod: *Module) bool {
- return hasRuntimeBitsAdvanced(ty, mod, false, .eager) catch unreachable;
+pub fn hasRuntimeBits(ty: Type, pt: Zcu.PerThread) bool {
+ return hasRuntimeBitsAdvanced(ty, pt, false, .eager) catch unreachable;
}
-pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool {
- return hasRuntimeBitsAdvanced(ty, mod, true, .eager) catch unreachable;
+pub fn hasRuntimeBitsIgnoreComptime(ty: Type, pt: Zcu.PerThread) bool {
+ return hasRuntimeBitsAdvanced(ty, pt, true, .eager) catch unreachable;
}
-pub fn fnHasRuntimeBits(ty: Type, mod: *Module) bool {
- return ty.fnHasRuntimeBitsAdvanced(mod, .normal) catch unreachable;
+pub fn fnHasRuntimeBits(ty: Type, pt: Zcu.PerThread) bool {
+ return ty.fnHasRuntimeBitsAdvanced(pt, .normal) catch unreachable;
}
/// Determines whether a function type has runtime bits, i.e. whether a
/// function with this type can exist at runtime.
/// Asserts that `ty` is a function type.
-pub fn fnHasRuntimeBitsAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaError!bool {
- const fn_info = mod.typeToFunc(ty).?;
+pub fn fnHasRuntimeBitsAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) SemaError!bool {
+ const fn_info = pt.zcu.typeToFunc(ty).?;
if (fn_info.is_generic) return false;
if (fn_info.is_var_args) return true;
if (fn_info.cc == .Inline) return false;
- return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(mod, strat);
+ return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(pt, strat);
}
-pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool {
- switch (ty.zigTypeTag(mod)) {
- .Fn => return ty.fnHasRuntimeBits(mod),
- else => return ty.hasRuntimeBits(mod),
+pub fn isFnOrHasRuntimeBits(ty: Type, pt: Zcu.PerThread) bool {
+ switch (ty.zigTypeTag(pt.zcu)) {
+ .Fn => return ty.fnHasRuntimeBits(pt),
+ else => return ty.hasRuntimeBits(pt),
}
}
/// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive.
-pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool {
- return switch (ty.zigTypeTag(mod)) {
+pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, pt: Zcu.PerThread) bool {
+ return switch (ty.zigTypeTag(pt.zcu)) {
.Fn => true,
- else => return ty.hasRuntimeBitsIgnoreComptime(mod),
+ else => return ty.hasRuntimeBitsIgnoreComptime(pt),
};
}
@@ -820,24 +822,24 @@ pub fn isNoReturn(ty: Type, mod: *Module) bool {
}
/// Returns `none` if the pointer is naturally aligned and the element type is 0-bit.
-pub fn ptrAlignment(ty: Type, mod: *Module) Alignment {
- return ptrAlignmentAdvanced(ty, mod, .normal) catch unreachable;
+pub fn ptrAlignment(ty: Type, pt: Zcu.PerThread) Alignment {
+ return ptrAlignmentAdvanced(ty, pt, .normal) catch unreachable;
}
-pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) !Alignment {
- return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+pub fn ptrAlignmentAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) !Alignment {
+ return switch (pt.zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| {
if (ptr_type.flags.alignment != .none)
return ptr_type.flags.alignment;
if (strat == .sema) {
- const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .sema);
+ const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(pt, .sema);
return res.scalar;
}
- return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
+ return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(pt, .eager) catch unreachable).scalar;
},
- .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(mod, strat),
+ .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(pt, strat),
else => unreachable,
};
}
@@ -851,16 +853,16 @@ pub fn ptrAddressSpace(ty: Type, mod: *const Module) std.builtin.AddressSpace {
}
/// Never returns `none`. Asserts that all necessary type resolution is already done.
-pub fn abiAlignment(ty: Type, mod: *Module) Alignment {
- return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
+pub fn abiAlignment(ty: Type, pt: Zcu.PerThread) Alignment {
+ return (ty.abiAlignmentAdvanced(pt, .eager) catch unreachable).scalar;
}
/// May capture a reference to `ty`.
/// Returned value has type `comptime_int`.
-pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value {
- switch (try ty.abiAlignmentAdvanced(mod, .lazy)) {
+pub fn lazyAbiAlignment(ty: Type, pt: Zcu.PerThread) !Value {
+ switch (try ty.abiAlignmentAdvanced(pt, .lazy)) {
.val => |val| return val,
- .scalar => |x| return mod.intValue(Type.comptime_int, x.toByteUnits() orelse 0),
+ .scalar => |x| return pt.intValue(Type.comptime_int, x.toByteUnits() orelse 0),
}
}
@@ -907,38 +909,39 @@ pub const ResolveStrat = enum {
/// necessary, possibly returning a CompileError.
pub fn abiAlignmentAdvanced(
ty: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
strat: ResolveStratLazy,
) SemaError!AbiAlignmentAdvanced {
+ const mod = pt.zcu;
const target = mod.getTarget();
const use_llvm = mod.comp.config.use_llvm;
const ip = &mod.intern_pool;
switch (ty.toIntern()) {
- .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .@"1" },
+ .empty_struct_type => return .{ .scalar = .@"1" },
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| {
- if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" };
+ if (int_type.bits == 0) return .{ .scalar = .@"1" };
return .{ .scalar = intAbiAlignment(int_type.bits, target, use_llvm) };
},
.ptr_type, .anyframe_type => {
return .{ .scalar = ptrAbiAlignment(target) };
},
.array_type => |array_type| {
- return Type.fromInterned(array_type.child).abiAlignmentAdvanced(mod, strat);
+ return Type.fromInterned(array_type.child).abiAlignmentAdvanced(pt, strat);
},
.vector_type => |vector_type| {
if (vector_type.len == 0) return .{ .scalar = .@"1" };
switch (mod.comp.getZigBackend()) {
else => {
- const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, .sema));
+ const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(pt, .sema));
if (elem_bits == 0) return .{ .scalar = .@"1" };
const bytes = ((elem_bits * vector_type.len) + 7) / 8;
const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes);
return .{ .scalar = Alignment.fromByteUnits(alignment) };
},
.stage2_c => {
- return Type.fromInterned(vector_type.child).abiAlignmentAdvanced(mod, strat);
+ return Type.fromInterned(vector_type.child).abiAlignmentAdvanced(pt, strat);
},
.stage2_x86_64 => {
if (vector_type.child == .bool_type) {
@@ -949,7 +952,7 @@ pub fn abiAlignmentAdvanced(
const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes);
return .{ .scalar = Alignment.fromByteUnits(alignment) };
}
- const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar);
+ const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(pt, strat)).scalar);
if (elem_bytes == 0) return .{ .scalar = .@"1" };
const bytes = elem_bytes * vector_type.len;
if (bytes > 32 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" };
@@ -959,12 +962,12 @@ pub fn abiAlignmentAdvanced(
}
},
- .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat),
- .error_union_type => |info| return abiAlignmentAdvancedErrorUnion(ty, mod, strat, Type.fromInterned(info.payload_type)),
+ .opt_type => return ty.abiAlignmentAdvancedOptional(pt, strat),
+ .error_union_type => |info| return ty.abiAlignmentAdvancedErrorUnion(pt, strat, Type.fromInterned(info.payload_type)),
.error_set_type, .inferred_error_set_type => {
const bits = mod.errorSetBits();
- if (bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" };
+ if (bits == 0) return .{ .scalar = .@"1" };
return .{ .scalar = intAbiAlignment(bits, target, use_llvm) };
},
@@ -1012,10 +1015,7 @@ pub fn abiAlignmentAdvanced(
},
.f80 => switch (target.c_type_bit_size(.longdouble)) {
80 => return .{ .scalar = cTypeAlign(target, .longdouble) },
- else => {
- const u80_ty: Type = .{ .ip_index = .u80_type };
- return .{ .scalar = abiAlignment(u80_ty, mod) };
- },
+ else => return .{ .scalar = Type.u80.abiAlignment(pt) },
},
.f128 => switch (target.c_type_bit_size(.longdouble)) {
128 => return .{ .scalar = cTypeAlign(target, .longdouble) },
@@ -1024,7 +1024,7 @@ pub fn abiAlignmentAdvanced(
.anyerror, .adhoc_inferred_error_set => {
const bits = mod.errorSetBits();
- if (bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" };
+ if (bits == 0) return .{ .scalar = .@"1" };
return .{ .scalar = intAbiAlignment(bits, target, use_llvm) };
},
@@ -1044,22 +1044,22 @@ pub fn abiAlignmentAdvanced(
const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.layout == .@"packed") {
switch (strat) {
- .sema => try ty.resolveLayout(mod),
+ .sema => try ty.resolveLayout(pt),
.lazy => if (struct_type.backingIntType(ip).* == .none) return .{
- .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } }))),
+ } })),
},
.eager => {},
}
- return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(mod) };
+ return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(pt) };
}
if (struct_type.flagsPtr(ip).alignment == .none) switch (strat) {
.eager => unreachable, // struct alignment not resolved
- .sema => try ty.resolveStructAlignment(mod),
- .lazy => return .{ .val = Value.fromInterned(try mod.intern(.{ .int = .{
+ .sema => try ty.resolveStructAlignment(pt),
+ .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
} })) },
@@ -1071,15 +1071,15 @@ pub fn abiAlignmentAdvanced(
var big_align: Alignment = .@"1";
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
if (val != .none) continue; // comptime field
- switch (try Type.fromInterned(field_ty).abiAlignmentAdvanced(mod, strat)) {
+ switch (try Type.fromInterned(field_ty).abiAlignmentAdvanced(pt, strat)) {
.scalar => |field_align| big_align = big_align.max(field_align),
.val => switch (strat) {
.eager => unreachable, // field type alignment not resolved
.sema => unreachable, // passed to abiAlignmentAdvanced above
- .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } }))) },
+ } })) },
},
}
}
@@ -1090,18 +1090,18 @@ pub fn abiAlignmentAdvanced(
if (union_type.flagsPtr(ip).alignment == .none) switch (strat) {
.eager => unreachable, // union layout not resolved
- .sema => try ty.resolveUnionAlignment(mod),
- .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .sema => try ty.resolveUnionAlignment(pt),
+ .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } }))) },
+ } })) },
};
return .{ .scalar = union_type.flagsPtr(ip).alignment };
},
.opaque_type => return .{ .scalar = .@"1" },
.enum_type => return .{
- .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiAlignment(mod),
+ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiAlignment(pt),
},
// values, not types
@@ -1131,91 +1131,92 @@ pub fn abiAlignmentAdvanced(
fn abiAlignmentAdvancedErrorUnion(
ty: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
strat: ResolveStratLazy,
payload_ty: Type,
) SemaError!AbiAlignmentAdvanced {
// This code needs to be kept in sync with the equivalent switch prong
// in abiSizeAdvanced.
- const code_align = abiAlignment(Type.anyerror, mod);
+ const code_align = Type.anyerror.abiAlignment(pt);
switch (strat) {
.eager, .sema => {
- if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ if (!(payload_ty.hasRuntimeBitsAdvanced(pt, false, strat) catch |err| switch (err) {
+ error.NeedLazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } }))) },
+ } })) },
else => |e| return e,
})) {
return .{ .scalar = code_align };
}
return .{ .scalar = code_align.max(
- (try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar,
+ (try payload_ty.abiAlignmentAdvanced(pt, strat)).scalar,
) };
},
.lazy => {
- switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) {
+ switch (try payload_ty.abiAlignmentAdvanced(pt, strat)) {
.scalar => |payload_align| return .{ .scalar = code_align.max(payload_align) },
.val => {},
}
- return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } }))) };
+ } })) };
},
}
}
fn abiAlignmentAdvancedOptional(
ty: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
strat: ResolveStratLazy,
) SemaError!AbiAlignmentAdvanced {
+ const mod = pt.zcu;
const target = mod.getTarget();
const child_type = ty.optionalChild(mod);
switch (child_type.zigTypeTag(mod)) {
.Pointer => return .{ .scalar = ptrAbiAlignment(target) },
- .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat),
+ .ErrorSet => return Type.anyerror.abiAlignmentAdvanced(pt, strat),
.NoReturn => return .{ .scalar = .@"1" },
else => {},
}
switch (strat) {
.eager, .sema => {
- if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ if (!(child_type.hasRuntimeBitsAdvanced(pt, false, strat) catch |err| switch (err) {
+ error.NeedLazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } }))) },
+ } })) },
else => |e| return e,
})) {
return .{ .scalar = .@"1" };
}
- return child_type.abiAlignmentAdvanced(mod, strat);
+ return child_type.abiAlignmentAdvanced(pt, strat);
},
- .lazy => switch (try child_type.abiAlignmentAdvanced(mod, strat)) {
+ .lazy => switch (try child_type.abiAlignmentAdvanced(pt, strat)) {
.scalar => |x| return .{ .scalar = x.max(.@"1") },
- .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .val => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } }))) },
+ } })) },
},
}
}
/// May capture a reference to `ty`.
-pub fn lazyAbiSize(ty: Type, mod: *Module) !Value {
- switch (try ty.abiSizeAdvanced(mod, .lazy)) {
+pub fn lazyAbiSize(ty: Type, pt: Zcu.PerThread) !Value {
+ switch (try ty.abiSizeAdvanced(pt, .lazy)) {
.val => |val| return val,
- .scalar => |x| return mod.intValue(Type.comptime_int, x),
+ .scalar => |x| return pt.intValue(Type.comptime_int, x),
}
}
/// Asserts the type has the ABI size already resolved.
/// Types that return false for hasRuntimeBits() return 0.
-pub fn abiSize(ty: Type, mod: *Module) u64 {
- return (abiSizeAdvanced(ty, mod, .eager) catch unreachable).scalar;
+pub fn abiSize(ty: Type, pt: Zcu.PerThread) u64 {
+ return (abiSizeAdvanced(ty, pt, .eager) catch unreachable).scalar;
}
const AbiSizeAdvanced = union(enum) {
@@ -1231,38 +1232,39 @@ const AbiSizeAdvanced = union(enum) {
/// necessary, possibly returning a CompileError.
pub fn abiSizeAdvanced(
ty: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
strat: ResolveStratLazy,
) SemaError!AbiSizeAdvanced {
+ const mod = pt.zcu;
const target = mod.getTarget();
const use_llvm = mod.comp.config.use_llvm;
const ip = &mod.intern_pool;
switch (ty.toIntern()) {
- .empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 },
+ .empty_struct_type => return .{ .scalar = 0 },
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| {
- if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 };
- return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target, use_llvm) };
+ if (int_type.bits == 0) return .{ .scalar = 0 };
+ return .{ .scalar = intAbiSize(int_type.bits, target, use_llvm) };
},
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.Slice => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 },
else => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) },
},
- .anyframe_type => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
+ .anyframe_type => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) },
.array_type => |array_type| {
const len = array_type.lenIncludingSentinel();
if (len == 0) return .{ .scalar = 0 };
- switch (try Type.fromInterned(array_type.child).abiSizeAdvanced(mod, strat)) {
+ switch (try Type.fromInterned(array_type.child).abiSizeAdvanced(pt, strat)) {
.scalar => |elem_size| return .{ .scalar = len * elem_size },
.val => switch (strat) {
.sema, .eager => unreachable,
- .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))) },
+ } })) },
},
}
},
@@ -1270,71 +1272,71 @@ pub fn abiSizeAdvanced(
const sub_strat: ResolveStrat = switch (strat) {
.sema => .sema,
.eager => .normal,
- .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))) },
+ } })) },
};
- const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) {
+ const alignment = switch (try ty.abiAlignmentAdvanced(pt, strat)) {
.scalar => |x| x,
- .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .val => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))) },
+ } })) },
};
const total_bytes = switch (mod.comp.getZigBackend()) {
else => total_bytes: {
- const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, sub_strat);
+ const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(pt, sub_strat);
const total_bits = elem_bits * vector_type.len;
break :total_bytes (total_bits + 7) / 8;
},
.stage2_c => total_bytes: {
- const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar);
+ const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(pt, strat)).scalar);
break :total_bytes elem_bytes * vector_type.len;
},
.stage2_x86_64 => total_bytes: {
if (vector_type.child == .bool_type) break :total_bytes std.math.divCeil(u32, vector_type.len, 8) catch unreachable;
- const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar);
+ const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(pt, strat)).scalar);
break :total_bytes elem_bytes * vector_type.len;
},
};
- return AbiSizeAdvanced{ .scalar = alignment.forward(total_bytes) };
+ return .{ .scalar = alignment.forward(total_bytes) };
},
- .opt_type => return ty.abiSizeAdvancedOptional(mod, strat),
+ .opt_type => return ty.abiSizeAdvancedOptional(pt, strat),
.error_set_type, .inferred_error_set_type => {
const bits = mod.errorSetBits();
- if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 };
- return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target, use_llvm) };
+ if (bits == 0) return .{ .scalar = 0 };
+ return .{ .scalar = intAbiSize(bits, target, use_llvm) };
},
.error_union_type => |error_union_type| {
const payload_ty = Type.fromInterned(error_union_type.payload_type);
// This code needs to be kept in sync with the equivalent switch prong
// in abiAlignmentAdvanced.
- const code_size = abiSize(Type.anyerror, mod);
- if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ const code_size = Type.anyerror.abiSize(pt);
+ if (!(payload_ty.hasRuntimeBitsAdvanced(pt, false, strat) catch |err| switch (err) {
+ error.NeedLazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))) },
+ } })) },
else => |e| return e,
})) {
// Same as anyerror.
- return AbiSizeAdvanced{ .scalar = code_size };
+ return .{ .scalar = code_size };
}
- const code_align = abiAlignment(Type.anyerror, mod);
- const payload_align = abiAlignment(payload_ty, mod);
- const payload_size = switch (try payload_ty.abiSizeAdvanced(mod, strat)) {
+ const code_align = Type.anyerror.abiAlignment(pt);
+ const payload_align = payload_ty.abiAlignment(pt);
+ const payload_size = switch (try payload_ty.abiSizeAdvanced(pt, strat)) {
.scalar => |elem_size| elem_size,
.val => switch (strat) {
.sema => unreachable,
.eager => unreachable,
- .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))) },
+ } })) },
},
};
@@ -1350,7 +1352,7 @@ pub fn abiSizeAdvanced(
size += code_size;
size = payload_align.forward(size);
}
- return AbiSizeAdvanced{ .scalar = size };
+ return .{ .scalar = size };
},
.func_type => unreachable, // represents machine code; not a pointer
.simple_type => |t| switch (t) {
@@ -1362,34 +1364,31 @@ pub fn abiSizeAdvanced(
.float_mode,
.reduce_op,
.call_modifier,
- => return AbiSizeAdvanced{ .scalar = 1 },
+ => return .{ .scalar = 1 },
- .f16 => return AbiSizeAdvanced{ .scalar = 2 },
- .f32 => return AbiSizeAdvanced{ .scalar = 4 },
- .f64 => return AbiSizeAdvanced{ .scalar = 8 },
- .f128 => return AbiSizeAdvanced{ .scalar = 16 },
+ .f16 => return .{ .scalar = 2 },
+ .f32 => return .{ .scalar = 4 },
+ .f64 => return .{ .scalar = 8 },
+ .f128 => return .{ .scalar = 16 },
.f80 => switch (target.c_type_bit_size(.longdouble)) {
- 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) },
- else => {
- const u80_ty: Type = .{ .ip_index = .u80_type };
- return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) };
- },
+ 80 => return .{ .scalar = target.c_type_byte_size(.longdouble) },
+ else => return .{ .scalar = Type.u80.abiSize(pt) },
},
.usize,
.isize,
- => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
-
- .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) },
- .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) },
- .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) },
- .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) },
- .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) },
- .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) },
- .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) },
- .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) },
- .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) },
- .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) },
+ => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) },
+
+ .c_char => return .{ .scalar = target.c_type_byte_size(.char) },
+ .c_short => return .{ .scalar = target.c_type_byte_size(.short) },
+ .c_ushort => return .{ .scalar = target.c_type_byte_size(.ushort) },
+ .c_int => return .{ .scalar = target.c_type_byte_size(.int) },
+ .c_uint => return .{ .scalar = target.c_type_byte_size(.uint) },
+ .c_long => return .{ .scalar = target.c_type_byte_size(.long) },
+ .c_ulong => return .{ .scalar = target.c_type_byte_size(.ulong) },
+ .c_longlong => return .{ .scalar = target.c_type_byte_size(.longlong) },
+ .c_ulonglong => return .{ .scalar = target.c_type_byte_size(.ulonglong) },
+ .c_longdouble => return .{ .scalar = target.c_type_byte_size(.longdouble) },
.anyopaque,
.void,
@@ -1399,12 +1398,12 @@ pub fn abiSizeAdvanced(
.null,
.undefined,
.enum_literal,
- => return AbiSizeAdvanced{ .scalar = 0 },
+ => return .{ .scalar = 0 },
.anyerror, .adhoc_inferred_error_set => {
const bits = mod.errorSetBits();
- if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 };
- return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target, use_llvm) };
+ if (bits == 0) return .{ .scalar = 0 };
+ return .{ .scalar = intAbiSize(bits, target, use_llvm) };
},
.prefetch_options => unreachable, // missing call to resolveTypeFields
@@ -1418,22 +1417,22 @@ pub fn abiSizeAdvanced(
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
switch (strat) {
- .sema => try ty.resolveLayout(mod),
+ .sema => try ty.resolveLayout(pt),
.lazy => switch (struct_type.layout) {
.@"packed" => {
if (struct_type.backingIntType(ip).* == .none) return .{
- .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))),
+ } })),
};
},
.auto, .@"extern" => {
if (!struct_type.haveLayout(ip)) return .{
- .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))),
+ } })),
};
},
},
@@ -1441,7 +1440,7 @@ pub fn abiSizeAdvanced(
}
switch (struct_type.layout) {
.@"packed" => return .{
- .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiSize(mod),
+ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiSize(pt),
},
.auto, .@"extern" => {
assert(struct_type.haveLayout(ip));
@@ -1451,25 +1450,25 @@ pub fn abiSizeAdvanced(
},
.anon_struct_type => |tuple| {
switch (strat) {
- .sema => try ty.resolveLayout(mod),
+ .sema => try ty.resolveLayout(pt),
.lazy, .eager => {},
}
const field_count = tuple.types.len;
if (field_count == 0) {
- return AbiSizeAdvanced{ .scalar = 0 };
+ return .{ .scalar = 0 };
}
- return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) };
+ return .{ .scalar = ty.structFieldOffset(field_count, pt) };
},
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
switch (strat) {
- .sema => try ty.resolveLayout(mod),
+ .sema => try ty.resolveLayout(pt),
.lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{
- .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))),
+ } })),
},
.eager => {},
}
@@ -1478,7 +1477,7 @@ pub fn abiSizeAdvanced(
return .{ .scalar = union_type.size(ip).* };
},
.opaque_type => unreachable, // no size available
- .enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(mod) },
+ .enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(pt) },
// values, not types
.undef,
@@ -1507,36 +1506,37 @@ pub fn abiSizeAdvanced(
fn abiSizeAdvancedOptional(
ty: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
strat: ResolveStratLazy,
) SemaError!AbiSizeAdvanced {
+ const mod = pt.zcu;
const child_ty = ty.optionalChild(mod);
if (child_ty.isNoReturn(mod)) {
- return AbiSizeAdvanced{ .scalar = 0 };
+ return .{ .scalar = 0 };
}
- if (!(child_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ if (!(child_ty.hasRuntimeBitsAdvanced(pt, false, strat) catch |err| switch (err) {
+ error.NeedLazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))) },
+ } })) },
else => |e| return e,
- })) return AbiSizeAdvanced{ .scalar = 1 };
+ })) return .{ .scalar = 1 };
if (ty.optionalReprIsPayload(mod)) {
- return abiSizeAdvanced(child_ty, mod, strat);
+ return child_ty.abiSizeAdvanced(pt, strat);
}
- const payload_size = switch (try child_ty.abiSizeAdvanced(mod, strat)) {
+ const payload_size = switch (try child_ty.abiSizeAdvanced(pt, strat)) {
.scalar => |elem_size| elem_size,
.val => switch (strat) {
.sema => unreachable,
.eager => unreachable,
- .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
+ .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } }))) },
+ } })) },
},
};
@@ -1544,8 +1544,8 @@ fn abiSizeAdvancedOptional(
// field and a boolean as the second. Since the child type's abi alignment is
// guaranteed to be >= that of bool's (1 byte) the added size is exactly equal
// to the child type's ABI alignment.
- return AbiSizeAdvanced{
- .scalar = (child_ty.abiAlignment(mod).toByteUnits() orelse 0) + payload_size,
+ return .{
+ .scalar = (child_ty.abiAlignment(pt).toByteUnits() orelse 0) + payload_size,
};
}
@@ -1675,15 +1675,16 @@ pub fn maxIntAlignment(target: std.Target, use_llvm: bool) u16 {
};
}
-pub fn bitSize(ty: Type, mod: *Module) u64 {
- return bitSizeAdvanced(ty, mod, .normal) catch unreachable;
+pub fn bitSize(ty: Type, pt: Zcu.PerThread) u64 {
+ return bitSizeAdvanced(ty, pt, .normal) catch unreachable;
}
pub fn bitSizeAdvanced(
ty: Type,
- mod: *Module,
+ pt: Zcu.PerThread,
strat: ResolveStrat,
) SemaError!u64 {
+ const mod = pt.zcu;
const target = mod.getTarget();
const ip = &mod.intern_pool;
@@ -1702,22 +1703,22 @@ pub fn bitSizeAdvanced(
if (len == 0) return 0;
const elem_ty = Type.fromInterned(array_type.child);
const elem_size = @max(
- (try elem_ty.abiAlignmentAdvanced(mod, strat_lazy)).scalar.toByteUnits() orelse 0,
- (try elem_ty.abiSizeAdvanced(mod, strat_lazy)).scalar,
+ (try elem_ty.abiAlignmentAdvanced(pt, strat_lazy)).scalar.toByteUnits() orelse 0,
+ (try elem_ty.abiSizeAdvanced(pt, strat_lazy)).scalar,
);
if (elem_size == 0) return 0;
- const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, strat);
+ const elem_bit_size = try elem_ty.bitSizeAdvanced(pt, strat);
return (len - 1) * 8 * elem_size + elem_bit_size;
},
.vector_type => |vector_type| {
const child_ty = Type.fromInterned(vector_type.child);
- const elem_bit_size = try bitSizeAdvanced(child_ty, mod, strat);
+ const elem_bit_size = try child_ty.bitSizeAdvanced(pt, strat);
return elem_bit_size * vector_type.len;
},
.opt_type => {
// Optionals and error unions are not packed so their bitsize
// includes padding bits.
- return (try abiSizeAdvanced(ty, mod, strat_lazy)).scalar * 8;
+ return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
},
.error_set_type, .inferred_error_set_type => return mod.errorSetBits(),
@@ -1725,7 +1726,7 @@ pub fn bitSizeAdvanced(
.error_union_type => {
// Optionals and error unions are not packed so their bitsize
// includes padding bits.
- return (try abiSizeAdvanced(ty, mod, strat_lazy)).scalar * 8;
+ return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
},
.func_type => unreachable, // represents machine code; not a pointer
.simple_type => |t| switch (t) {
@@ -1783,42 +1784,42 @@ pub fn bitSizeAdvanced(
const struct_type = ip.loadStructType(ty.toIntern());
const is_packed = struct_type.layout == .@"packed";
if (strat == .sema) {
- try ty.resolveFields(mod);
- if (is_packed) try ty.resolveLayout(mod);
+ try ty.resolveFields(pt);
+ if (is_packed) try ty.resolveLayout(pt);
}
if (is_packed) {
- return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(mod, strat);
+ return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(pt, strat);
}
- return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8;
+ return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
},
.anon_struct_type => {
- if (strat == .sema) try ty.resolveFields(mod);
- return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8;
+ if (strat == .sema) try ty.resolveFields(pt);
+ return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
},
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
const is_packed = ty.containerLayout(mod) == .@"packed";
if (strat == .sema) {
- try ty.resolveFields(mod);
- if (is_packed) try ty.resolveLayout(mod);
+ try ty.resolveFields(pt);
+ if (is_packed) try ty.resolveLayout(pt);
}
if (!is_packed) {
- return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8;
+ return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8;
}
assert(union_type.flagsPtr(ip).status.haveFieldTypes());
var size: u64 = 0;
for (0..union_type.field_types.len) |field_index| {
const field_ty = union_type.field_types.get(ip)[field_index];
- size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, strat));
+ size = @max(size, try Type.fromInterned(field_ty).bitSizeAdvanced(pt, strat));
}
return size;
},
.opaque_type => unreachable,
- .enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, strat),
+ .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).bitSizeAdvanced(pt, strat),
// values, not types
.undef,
@@ -1870,7 +1871,7 @@ pub fn isSinglePointer(ty: Type, mod: *const Module) bool {
/// Asserts `ty` is a pointer.
pub fn ptrSize(ty: Type, mod: *const Module) std.builtin.Type.Pointer.Size {
- return ptrSizeOrNull(ty, mod).?;
+ return ty.ptrSizeOrNull(mod).?;
}
/// Returns `null` if `ty` is not a pointer.
@@ -2105,29 +2106,28 @@ pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 {
return mod.unionTagFieldIndex(union_obj, enum_tag);
}
-pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *Module) bool {
- const ip = &mod.intern_pool;
- const union_obj = mod.typeToUnion(ty).?;
+pub fn unionHasAllZeroBitFieldTypes(ty: Type, pt: Zcu.PerThread) bool {
+ const ip = &pt.zcu.intern_pool;
+ const union_obj = pt.zcu.typeToUnion(ty).?;
for (union_obj.field_types.get(ip)) |field_ty| {
- if (Type.fromInterned(field_ty).hasRuntimeBits(mod)) return false;
+ if (Type.fromInterned(field_ty).hasRuntimeBits(pt)) return false;
}
return true;
}
/// Returns the type used for backing storage of this union during comptime operations.
/// Asserts the type is either an extern or packed union.
-pub fn unionBackingType(ty: Type, mod: *Module) !Type {
- return switch (ty.containerLayout(mod)) {
- .@"extern" => try mod.arrayType(.{ .len = ty.abiSize(mod), .child = .u8_type }),
- .@"packed" => try mod.intType(.unsigned, @intCast(ty.bitSize(mod))),
+pub fn unionBackingType(ty: Type, pt: Zcu.PerThread) !Type {
+ return switch (ty.containerLayout(pt.zcu)) {
+ .@"extern" => try pt.arrayType(.{ .len = ty.abiSize(pt), .child = .u8_type }),
+ .@"packed" => try pt.intType(.unsigned, @intCast(ty.bitSize(pt))),
.auto => unreachable,
};
}
-pub fn unionGetLayout(ty: Type, mod: *Module) Module.UnionLayout {
- const ip = &mod.intern_pool;
- const union_obj = ip.loadUnionType(ty.toIntern());
- return mod.getUnionLayout(union_obj);
+pub fn unionGetLayout(ty: Type, pt: Zcu.PerThread) Module.UnionLayout {
+ const union_obj = pt.zcu.intern_pool.loadUnionType(ty.toIntern());
+ return pt.getUnionLayout(union_obj);
}
pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout {
@@ -2509,7 +2509,8 @@ pub fn isNumeric(ty: Type, mod: *const Module) bool {
/// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which
/// resolves field types rather than asserting they are already resolved.
-pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
+pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
+ const mod = pt.zcu;
var ty = starting_type;
const ip = &mod.intern_pool;
while (true) switch (ty.toIntern()) {
@@ -2518,7 +2519,7 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| {
if (int_type.bits == 0) {
- return try mod.intValue(ty, 0);
+ return try pt.intValue(ty, 0);
} else {
return null;
}
@@ -2534,21 +2535,21 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
inline .array_type, .vector_type => |seq_type, seq_tag| {
const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none;
- if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = &.{} },
- } })));
- if (try Type.fromInterned(seq_type.child).onePossibleValue(mod)) |opv| {
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ } }));
+ if (try Type.fromInterned(seq_type.child).onePossibleValue(pt)) |opv| {
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .repeated_elem = opv.toIntern() },
- } })));
+ } }));
}
return null;
},
.opt_type => |child| {
if (child == .noreturn_type) {
- return try mod.nullValue(ty);
+ return try pt.nullValue(ty);
} else {
return null;
}
@@ -2615,17 +2616,17 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
continue;
}
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
- if (try field_ty.onePossibleValue(mod)) |field_opv| {
+ if (try field_ty.onePossibleValue(pt)) |field_opv| {
field_val.* = field_opv.toIntern();
} else return null;
}
// In this case the struct has no runtime-known fields and
// therefore has one possible value.
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = field_vals },
- } })));
+ } }));
},
.anon_struct_type => |tuple| {
@@ -2637,24 +2638,24 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
// TODO: write something like getCoercedInts to avoid needing to dupe
const duped_values = try mod.gpa.dupe(InternPool.Index, tuple.values.get(ip));
defer mod.gpa.free(duped_values);
- return Value.fromInterned((try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = duped_values },
- } })));
+ } }));
},
.union_type => {
const union_obj = ip.loadUnionType(ty.toIntern());
- const tag_val = (try Type.fromInterned(union_obj.enum_tag_ty).onePossibleValue(mod)) orelse
+ const tag_val = (try Type.fromInterned(union_obj.enum_tag_ty).onePossibleValue(pt)) orelse
return null;
if (union_obj.field_types.len == 0) {
- const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
+ const only = try pt.intern(.{ .empty_enum_value = ty.toIntern() });
return Value.fromInterned(only);
}
const only_field_ty = union_obj.field_types.get(ip)[0];
- const val_val = (try Type.fromInterned(only_field_ty).onePossibleValue(mod)) orelse
+ const val_val = (try Type.fromInterned(only_field_ty).onePossibleValue(pt)) orelse
return null;
- const only = try mod.intern(.{ .un = .{
+ const only = try pt.intern(.{ .un = .{
.ty = ty.toIntern(),
.tag = tag_val.toIntern(),
.val = val_val.toIntern(),
@@ -2668,8 +2669,8 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
.nonexhaustive => {
if (enum_type.tag_ty == .comptime_int_type) return null;
- if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(mod)) |int_opv| {
- const only = try mod.intern(.{ .enum_tag = .{
+ if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(pt)) |int_opv| {
+ const only = try pt.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = int_opv.toIntern(),
} });
@@ -2679,18 +2680,18 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
return null;
},
.auto, .explicit => {
- if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null;
+ if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(pt)) return null;
switch (enum_type.names.len) {
0 => {
- const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
+ const only = try pt.intern(.{ .empty_enum_value = ty.toIntern() });
return Value.fromInterned(only);
},
1 => {
if (enum_type.values.len == 0) {
- const only = try mod.intern(.{ .enum_tag = .{
+ const only = try pt.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
- .int = try mod.intern(.{ .int = .{
+ .int = try pt.intern(.{ .int = .{
.ty = enum_type.tag_ty,
.storage = .{ .u64 = 0 },
} }),
@@ -2733,13 +2734,14 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
/// During semantic analysis, instead call `Sema.typeRequiresComptime` which
/// resolves field types rather than asserting they are already resolved.
-pub fn comptimeOnly(ty: Type, mod: *Module) bool {
- return ty.comptimeOnlyAdvanced(mod, .normal) catch unreachable;
+pub fn comptimeOnly(ty: Type, pt: Zcu.PerThread) bool {
+ return ty.comptimeOnlyAdvanced(pt, .normal) catch unreachable;
}
/// `generic_poison` will return false.
/// May return false negatives when structs and unions are having their field types resolved.
-pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaError!bool {
+pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) SemaError!bool {
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
return switch (ty.toIntern()) {
.empty_struct_type => false,
@@ -2749,19 +2751,19 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaErr
.ptr_type => |ptr_type| {
const child_ty = Type.fromInterned(ptr_type.child);
switch (child_ty.zigTypeTag(mod)) {
- .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(mod, strat),
+ .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(pt, strat),
.Opaque => return false,
- else => return child_ty.comptimeOnlyAdvanced(mod, strat),
+ else => return child_ty.comptimeOnlyAdvanced(pt, strat),
}
},
.anyframe_type => |child| {
if (child == .none) return false;
- return Type.fromInterned(child).comptimeOnlyAdvanced(mod, strat);
+ return Type.fromInterned(child).comptimeOnlyAdvanced(pt, strat);
},
- .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(mod, strat),
- .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(mod, strat),
- .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(mod, strat),
- .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(mod, strat),
+ .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(pt, strat),
+ .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(pt, strat),
+ .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(pt, strat),
+ .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(pt, strat),
.error_set_type,
.inferred_error_set_type,
@@ -2836,13 +2838,13 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaErr
struct_type.flagsPtr(ip).requires_comptime = .wip;
errdefer struct_type.flagsPtr(ip).requires_comptime = .unknown;
- try ty.resolveFields(mod);
+ try ty.resolveFields(pt);
for (0..struct_type.field_types.len) |i_usize| {
const i: u32 = @intCast(i_usize);
if (struct_type.fieldIsComptime(ip, i)) continue;
const field_ty = struct_type.field_types.get(ip)[i];
- if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) {
+ if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(pt, strat)) {
// Note that this does not cause the layout to
// be considered resolved. Comptime-only types
// still maintain a layout of their
@@ -2861,7 +2863,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaErr
.anon_struct_type => |tuple| {
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
const have_comptime_val = val != .none;
- if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) return true;
+ if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(pt, strat)) return true;
}
return false;
},
@@ -2880,11 +2882,11 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaErr
union_type.flagsPtr(ip).requires_comptime = .wip;
errdefer union_type.flagsPtr(ip).requires_comptime = .unknown;
- try ty.resolveFields(mod);
+ try ty.resolveFields(pt);
for (0..union_type.field_types.len) |field_idx| {
const field_ty = union_type.field_types.get(ip)[field_idx];
- if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) {
+ if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(pt, strat)) {
union_type.flagsPtr(ip).requires_comptime = .yes;
return true;
}
@@ -2898,7 +2900,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaErr
.opaque_type => false,
- .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, strat),
+ .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(pt, strat),
// values, not types
.undef,
@@ -2930,10 +2932,10 @@ pub fn isVector(ty: Type, mod: *const Module) bool {
}
/// Returns 0 if not a vector, otherwise returns @bitSizeOf(Element) * vector_len.
-pub fn totalVectorBits(ty: Type, zcu: *Zcu) u64 {
- if (!ty.isVector(zcu)) return 0;
- const v = zcu.intern_pool.indexToKey(ty.toIntern()).vector_type;
- return v.len * Type.fromInterned(v.child).bitSize(zcu);
+pub fn totalVectorBits(ty: Type, pt: Zcu.PerThread) u64 {
+ if (!ty.isVector(pt.zcu)) return 0;
+ const v = pt.zcu.intern_pool.indexToKey(ty.toIntern()).vector_type;
+ return v.len * Type.fromInterned(v.child).bitSize(pt);
}
pub fn isArrayOrVector(ty: Type, mod: *const Module) bool {
@@ -3013,23 +3015,25 @@ pub fn getNamespace(ty: Type, zcu: *Zcu) ?InternPool.OptionalNamespaceIndex {
}
// Works for vectors and vectors of integers.
-pub fn minInt(ty: Type, mod: *Module, dest_ty: Type) !Value {
- const scalar = try minIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod));
- return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{
+pub fn minInt(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
+ const mod = pt.zcu;
+ const scalar = try minIntScalar(ty.scalarType(mod), pt, dest_ty.scalarType(mod));
+ return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
.storage = .{ .repeated_elem = scalar.toIntern() },
- } }))) else scalar;
+ } })) else scalar;
}
/// Asserts that the type is an integer.
-pub fn minIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value {
+pub fn minIntScalar(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
+ const mod = pt.zcu;
const info = ty.intInfo(mod);
- if (info.signedness == .unsigned) return mod.intValue(dest_ty, 0);
- if (info.bits == 0) return mod.intValue(dest_ty, -1);
+ if (info.signedness == .unsigned) return pt.intValue(dest_ty, 0);
+ if (info.bits == 0) return pt.intValue(dest_ty, -1);
if (std.math.cast(u6, info.bits - 1)) |shift| {
const n = @as(i64, std.math.minInt(i64)) >> (63 - shift);
- return mod.intValue(dest_ty, n);
+ return pt.intValue(dest_ty, n);
}
var res = try std.math.big.int.Managed.init(mod.gpa);
@@ -3037,31 +3041,32 @@ pub fn minIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value {
try res.setTwosCompIntLimit(.min, info.signedness, info.bits);
- return mod.intValue_big(dest_ty, res.toConst());
+ return pt.intValue_big(dest_ty, res.toConst());
}
// Works for vectors and vectors of integers.
/// The returned Value will have type dest_ty.
-pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value {
- const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod));
- return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{
+pub fn maxInt(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
+ const mod = pt.zcu;
+ const scalar = try maxIntScalar(ty.scalarType(mod), pt, dest_ty.scalarType(mod));
+ return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
.storage = .{ .repeated_elem = scalar.toIntern() },
- } }))) else scalar;
+ } })) else scalar;
}
/// The returned Value will have type dest_ty.
-pub fn maxIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value {
- const info = ty.intInfo(mod);
+pub fn maxIntScalar(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
+ const info = ty.intInfo(pt.zcu);
switch (info.bits) {
0 => return switch (info.signedness) {
- .signed => try mod.intValue(dest_ty, -1),
- .unsigned => try mod.intValue(dest_ty, 0),
+ .signed => try pt.intValue(dest_ty, -1),
+ .unsigned => try pt.intValue(dest_ty, 0),
},
1 => return switch (info.signedness) {
- .signed => try mod.intValue(dest_ty, 0),
- .unsigned => try mod.intValue(dest_ty, 1),
+ .signed => try pt.intValue(dest_ty, 0),
+ .unsigned => try pt.intValue(dest_ty, 1),
},
else => {},
}
@@ -3069,20 +3074,20 @@ pub fn maxIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value {
if (std.math.cast(u6, info.bits - 1)) |shift| switch (info.signedness) {
.signed => {
const n = @as(i64, std.math.maxInt(i64)) >> (63 - shift);
- return mod.intValue(dest_ty, n);
+ return pt.intValue(dest_ty, n);
},
.unsigned => {
const n = @as(u64, std.math.maxInt(u64)) >> (63 - shift);
- return mod.intValue(dest_ty, n);
+ return pt.intValue(dest_ty, n);
},
};
- var res = try std.math.big.int.Managed.init(mod.gpa);
+ var res = try std.math.big.int.Managed.init(pt.zcu.gpa);
defer res.deinit();
try res.setTwosCompIntLimit(.max, info.signedness, info.bits);
- return mod.intValue_big(dest_ty, res.toConst());
+ return pt.intValue_big(dest_ty, res.toConst());
}
/// Asserts the type is an enum or a union.
@@ -3188,26 +3193,26 @@ pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type {
};
}
-pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment {
- return ty.structFieldAlignAdvanced(index, zcu, .normal) catch unreachable;
+pub fn structFieldAlign(ty: Type, index: usize, pt: Zcu.PerThread) Alignment {
+ return ty.structFieldAlignAdvanced(index, pt, .normal) catch unreachable;
}
-pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, strat: ResolveStrat) !Alignment {
- const ip = &zcu.intern_pool;
+pub fn structFieldAlignAdvanced(ty: Type, index: usize, pt: Zcu.PerThread, strat: ResolveStrat) !Alignment {
+ const ip = &pt.zcu.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.layout != .@"packed");
const explicit_align = struct_type.fieldAlign(ip, index);
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
- return zcu.structFieldAlignmentAdvanced(explicit_align, field_ty, struct_type.layout, strat);
+ return pt.structFieldAlignmentAdvanced(explicit_align, field_ty, struct_type.layout, strat);
},
.anon_struct_type => |anon_struct| {
- return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, strat.toLazy())).scalar;
+ return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(pt, strat.toLazy())).scalar;
},
.union_type => {
const union_obj = ip.loadUnionType(ty.toIntern());
- return zcu.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(index), strat);
+ return pt.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(index), strat);
},
else => unreachable,
}
@@ -3233,7 +3238,8 @@ pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value {
}
}
-pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value {
+pub fn structFieldValueComptime(ty: Type, pt: Zcu.PerThread, index: usize) !?Value {
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {
@@ -3242,13 +3248,13 @@ pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value {
assert(struct_type.haveFieldInits(ip));
return Value.fromInterned(struct_type.field_inits.get(ip)[index]);
} else {
- return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(mod);
+ return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(pt);
}
},
.anon_struct_type => |tuple| {
const val = tuple.values.get(ip)[index];
if (val == .none) {
- return Type.fromInterned(tuple.types.get(ip)[index]).onePossibleValue(mod);
+ return Type.fromInterned(tuple.types.get(ip)[index]).onePossibleValue(pt);
} else {
return Value.fromInterned(val);
}
@@ -3272,7 +3278,8 @@ pub const FieldOffset = struct {
};
/// Supports structs and unions.
-pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 {
+pub fn structFieldOffset(ty: Type, index: usize, pt: Zcu.PerThread) u64 {
+ const mod = pt.zcu;
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {
@@ -3287,17 +3294,17 @@ pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 {
var big_align: Alignment = .none;
for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| {
- if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) {
+ if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) {
// comptime field
if (i == index) return offset;
continue;
}
- const field_align = Type.fromInterned(field_ty).abiAlignment(mod);
+ const field_align = Type.fromInterned(field_ty).abiAlignment(pt);
big_align = big_align.max(field_align);
offset = field_align.forward(offset);
if (i == index) return offset;
- offset += Type.fromInterned(field_ty).abiSize(mod);
+ offset += Type.fromInterned(field_ty).abiSize(pt);
}
offset = big_align.max(.@"1").forward(offset);
return offset;
@@ -3307,7 +3314,7 @@ pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 {
const union_type = ip.loadUnionType(ty.toIntern());
if (!union_type.hasTag(ip))
return 0;
- const layout = mod.getUnionLayout(union_type);
+ const layout = pt.getUnionLayout(union_type);
if (layout.tag_align.compare(.gte, layout.payload_align)) {
// {Tag, Payload}
return layout.payload_align.forward(layout.tag_size);
@@ -3421,12 +3428,13 @@ pub fn optEuBaseType(ty: Type, mod: *Module) Type {
};
}
-pub fn toUnsigned(ty: Type, mod: *Module) !Type {
+pub fn toUnsigned(ty: Type, pt: Zcu.PerThread) !Type {
+ const mod = pt.zcu;
return switch (ty.zigTypeTag(mod)) {
- .Int => mod.intType(.unsigned, ty.intInfo(mod).bits),
- .Vector => try mod.vectorType(.{
+ .Int => pt.intType(.unsigned, ty.intInfo(mod).bits),
+ .Vector => try pt.vectorType(.{
.len = ty.vectorLen(mod),
- .child = (try ty.childType(mod).toUnsigned(mod)).toIntern(),
+ .child = (try ty.childType(mod).toUnsigned(pt)).toIntern(),
}),
else => unreachable,
};
@@ -3492,7 +3500,7 @@ pub fn arrayBase(ty: Type, zcu: *const Zcu) struct { Type, u64 } {
return .{ cur_ty, cur_len };
}
-pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: u32, zcu: *Zcu) union(enum) {
+pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: u32, pt: Zcu.PerThread) union(enum) {
/// The result is a bit-pointer with the same value and a new packed offset.
bit_ptr: InternPool.Key.PtrType.PackedOffset,
/// The result is a standard pointer.
@@ -3505,6 +3513,7 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx:
} {
comptime assert(Type.packed_struct_layout_version == 2);
+ const zcu = pt.zcu;
const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu);
const field_ty = struct_ty.structFieldType(field_idx, zcu);
@@ -3515,7 +3524,7 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx:
if (i == field_idx) {
bit_offset = running_bits;
}
- running_bits += @intCast(f_ty.bitSize(zcu));
+ running_bits += @intCast(f_ty.bitSize(pt));
}
const res_host_size: u16, const res_bit_offset: u16 = if (parent_ptr_info.packed_offset.host_size != 0)
@@ -3532,9 +3541,9 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx:
// targets before adding the necessary complications to this code. This will not
// cause miscompilations; it only means the field pointer uses bit masking when it
// might not be strictly necessary.
- if (res_bit_offset % 8 == 0 and field_ty.bitSize(zcu) == field_ty.abiSize(zcu) * 8 and zcu.getTarget().cpu.arch.endian() == .little) {
+ if (res_bit_offset % 8 == 0 and field_ty.bitSize(pt) == field_ty.abiSize(pt) * 8 and zcu.getTarget().cpu.arch.endian() == .little) {
const byte_offset = res_bit_offset / 8;
- const new_align = Alignment.fromLog2Units(@ctz(byte_offset | parent_ptr_ty.ptrAlignment(zcu).toByteUnits().?));
+ const new_align = Alignment.fromLog2Units(@ctz(byte_offset | parent_ptr_ty.ptrAlignment(pt).toByteUnits().?));
return .{ .byte_ptr = .{
.offset = byte_offset,
.alignment = new_align,
@@ -3547,34 +3556,35 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx:
} };
}
-pub fn resolveLayout(ty: Type, zcu: *Zcu) SemaError!void {
+pub fn resolveLayout(ty: Type, pt: Zcu.PerThread) SemaError!void {
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
- .simple_type => |simple_type| return resolveSimpleType(simple_type, zcu),
+ .simple_type => |simple_type| return resolveSimpleType(simple_type, pt),
else => {},
}
switch (ty.zigTypeTag(zcu)) {
.Struct => switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| {
const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]);
- try field_ty.resolveLayout(zcu);
+ try field_ty.resolveLayout(pt);
},
- .struct_type => return ty.resolveStructInner(zcu, .layout),
+ .struct_type => return ty.resolveStructInner(pt, .layout),
else => unreachable,
},
- .Union => return ty.resolveUnionInner(zcu, .layout),
+ .Union => return ty.resolveUnionInner(pt, .layout),
.Array => {
if (ty.arrayLenIncludingSentinel(zcu) == 0) return;
const elem_ty = ty.childType(zcu);
- return elem_ty.resolveLayout(zcu);
+ return elem_ty.resolveLayout(pt);
},
.Optional => {
const payload_ty = ty.optionalChild(zcu);
- return payload_ty.resolveLayout(zcu);
+ return payload_ty.resolveLayout(pt);
},
.ErrorUnion => {
const payload_ty = ty.errorUnionPayload(zcu);
- return payload_ty.resolveLayout(zcu);
+ return payload_ty.resolveLayout(pt);
},
.Fn => {
const info = zcu.typeToFunc(ty).?;
@@ -3585,16 +3595,16 @@ pub fn resolveLayout(ty: Type, zcu: *Zcu) SemaError!void {
}
for (0..info.param_types.len) |i| {
const param_ty = info.param_types.get(ip)[i];
- try Type.fromInterned(param_ty).resolveLayout(zcu);
+ try Type.fromInterned(param_ty).resolveLayout(pt);
}
- try Type.fromInterned(info.return_type).resolveLayout(zcu);
+ try Type.fromInterned(info.return_type).resolveLayout(pt);
},
else => {},
}
}
-pub fn resolveFields(ty: Type, zcu: *Zcu) SemaError!void {
- const ip = &zcu.intern_pool;
+pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void {
+ const ip = &pt.zcu.intern_pool;
const ty_ip = ty.toIntern();
switch (ty_ip) {
@@ -3676,26 +3686,27 @@ pub fn resolveFields(ty: Type, zcu: *Zcu) SemaError!void {
.empty_struct => unreachable,
.generic_poison => unreachable,
- else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) {
+ else => switch (ty_ip.unwrap(ip).getTag(ip)) {
.type_struct,
.type_struct_packed,
.type_struct_packed_inits,
- => return ty.resolveStructInner(zcu, .fields),
+ => return ty.resolveStructInner(pt, .fields),
- .type_union => return ty.resolveUnionInner(zcu, .fields),
+ .type_union => return ty.resolveUnionInner(pt, .fields),
- .simple_type => return resolveSimpleType(ip.indexToKey(ty_ip).simple_type, zcu),
+ .simple_type => return resolveSimpleType(ip.indexToKey(ty_ip).simple_type, pt),
else => {},
},
}
}
-pub fn resolveFully(ty: Type, zcu: *Zcu) SemaError!void {
+pub fn resolveFully(ty: Type, pt: Zcu.PerThread) SemaError!void {
+ const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
- .simple_type => |simple_type| return resolveSimpleType(simple_type, zcu),
+ .simple_type => |simple_type| return resolveSimpleType(simple_type, pt),
else => {},
}
@@ -3719,52 +3730,53 @@ pub fn resolveFully(ty: Type, zcu: *Zcu) SemaError!void {
.EnumLiteral,
=> {},
- .Pointer => return ty.childType(zcu).resolveFully(zcu),
- .Array => return ty.childType(zcu).resolveFully(zcu),
- .Optional => return ty.optionalChild(zcu).resolveFully(zcu),
- .ErrorUnion => return ty.errorUnionPayload(zcu).resolveFully(zcu),
+ .Pointer => return ty.childType(zcu).resolveFully(pt),
+ .Array => return ty.childType(zcu).resolveFully(pt),
+ .Optional => return ty.optionalChild(zcu).resolveFully(pt),
+ .ErrorUnion => return ty.errorUnionPayload(zcu).resolveFully(pt),
.Fn => {
const info = zcu.typeToFunc(ty).?;
if (info.is_generic) return;
for (0..info.param_types.len) |i| {
const param_ty = info.param_types.get(ip)[i];
- try Type.fromInterned(param_ty).resolveFully(zcu);
+ try Type.fromInterned(param_ty).resolveFully(pt);
}
- try Type.fromInterned(info.return_type).resolveFully(zcu);
+ try Type.fromInterned(info.return_type).resolveFully(pt);
},
.Struct => switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| {
const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]);
- try field_ty.resolveFully(zcu);
+ try field_ty.resolveFully(pt);
},
- .struct_type => return ty.resolveStructInner(zcu, .full),
+ .struct_type => return ty.resolveStructInner(pt, .full),
else => unreachable,
},
- .Union => return ty.resolveUnionInner(zcu, .full),
+ .Union => return ty.resolveUnionInner(pt, .full),
}
}
-pub fn resolveStructFieldInits(ty: Type, zcu: *Zcu) SemaError!void {
+pub fn resolveStructFieldInits(ty: Type, pt: Zcu.PerThread) SemaError!void {
// TODO: stop calling this for tuples!
- _ = zcu.typeToStruct(ty) orelse return;
- return ty.resolveStructInner(zcu, .inits);
+ _ = pt.zcu.typeToStruct(ty) orelse return;
+ return ty.resolveStructInner(pt, .inits);
}
-pub fn resolveStructAlignment(ty: Type, zcu: *Zcu) SemaError!void {
- return ty.resolveStructInner(zcu, .alignment);
+pub fn resolveStructAlignment(ty: Type, pt: Zcu.PerThread) SemaError!void {
+ return ty.resolveStructInner(pt, .alignment);
}
-pub fn resolveUnionAlignment(ty: Type, zcu: *Zcu) SemaError!void {
- return ty.resolveUnionInner(zcu, .alignment);
+pub fn resolveUnionAlignment(ty: Type, pt: Zcu.PerThread) SemaError!void {
+ return ty.resolveUnionInner(pt, .alignment);
}
/// `ty` must be a struct.
fn resolveStructInner(
ty: Type,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
resolution: enum { fields, inits, alignment, layout, full },
) SemaError!void {
+ const zcu = pt.zcu;
const gpa = zcu.gpa;
const struct_obj = zcu.typeToStruct(ty).?;
@@ -3777,7 +3789,7 @@ fn resolveStructInner(
defer comptime_err_ret_trace.deinit();
var sema: Sema = .{
- .mod = zcu,
+ .pt = pt,
.gpa = gpa,
.arena = analysis_arena.allocator(),
.code = undefined, // This ZIR will not be used.
@@ -3804,9 +3816,10 @@ fn resolveStructInner(
/// `ty` must be a union.
fn resolveUnionInner(
ty: Type,
- zcu: *Zcu,
+ pt: Zcu.PerThread,
resolution: enum { fields, alignment, layout, full },
) SemaError!void {
+ const zcu = pt.zcu;
const gpa = zcu.gpa;
const union_obj = zcu.typeToUnion(ty).?;
@@ -3819,7 +3832,7 @@ fn resolveUnionInner(
defer comptime_err_ret_trace.deinit();
var sema: Sema = .{
- .mod = zcu,
+ .pt = pt,
.gpa = gpa,
.arena = analysis_arena.allocator(),
.code = undefined, // This ZIR will not be used.
@@ -3845,7 +3858,7 @@ fn resolveUnionInner(
/// Fully resolves a simple type. This is usually a nop, but for builtin types with
/// special InternPool indices (such as std.builtin.Type) it will analyze and fully
/// resolve the type.
-fn resolveSimpleType(simple_type: InternPool.SimpleType, zcu: *Zcu) Allocator.Error!void {
+fn resolveSimpleType(simple_type: InternPool.SimpleType, pt: Zcu.PerThread) Allocator.Error!void {
const builtin_type_name: []const u8 = switch (simple_type) {
.atomic_order => "AtomicOrder",
.atomic_rmw_op => "AtomicRmwOp",
@@ -3861,7 +3874,7 @@ fn resolveSimpleType(simple_type: InternPool.SimpleType, zcu: *Zcu) Allocator.Er
else => return,
};
// This will fully resolve the type.
- _ = try zcu.getBuiltinType(builtin_type_name);
+ _ = try pt.getBuiltinType(builtin_type_name);
}
/// Returns the type of a pointer to an element.
@@ -3874,7 +3887,8 @@ fn resolveSimpleType(simple_type: InternPool.SimpleType, zcu: *Zcu) Allocator.Er
/// Handles const-ness and address spaces in particular.
/// This code is duplicated in `Sema.analyzePtrArithmetic`.
/// May perform type resolution and return a transitive `error.AnalysisFail`.
-pub fn elemPtrType(ptr_ty: Type, offset: ?usize, zcu: *Zcu) !Type {
+pub fn elemPtrType(ptr_ty: Type, offset: ?usize, pt: Zcu.PerThread) !Type {
+ const zcu = pt.zcu;
const ptr_info = ptr_ty.ptrInfo(zcu);
const elem_ty = ptr_ty.elemType2(zcu);
const is_allowzero = ptr_info.flags.is_allowzero and (offset orelse 0) == 0;
@@ -3887,14 +3901,14 @@ pub fn elemPtrType(ptr_ty: Type, offset: ?usize, zcu: *Zcu) !Type {
alignment: Alignment = .none,
vector_index: VI = .none,
} = if (parent_ty.isVector(zcu) and ptr_info.flags.size == .One) blk: {
- const elem_bits = elem_ty.bitSize(zcu);
+ const elem_bits = elem_ty.bitSize(pt);
if (elem_bits == 0) break :blk .{};
const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits);
if (!is_packed) break :blk .{};
break :blk .{
.host_size = @intCast(parent_ty.arrayLen(zcu)),
- .alignment = parent_ty.abiAlignment(zcu),
+ .alignment = parent_ty.abiAlignment(pt),
.vector_index = if (offset) |some| @enumFromInt(some) else .runtime,
};
} else .{};
@@ -3908,7 +3922,7 @@ pub fn elemPtrType(ptr_ty: Type, offset: ?usize, zcu: *Zcu) !Type {
}
// If the addend is not a comptime-known value we can still count on
// it being a multiple of the type size.
- const elem_size = (try elem_ty.abiSizeAdvanced(zcu, .sema)).scalar;
+ const elem_size = (try elem_ty.abiSizeAdvanced(pt, .sema)).scalar;
const addend = if (offset) |off| elem_size * off else elem_size;
// The resulting pointer is aligned to the lcd between the offset (an
@@ -3921,7 +3935,7 @@ pub fn elemPtrType(ptr_ty: Type, offset: ?usize, zcu: *Zcu) !Type {
assert(new_align != .none);
break :a new_align;
};
- return zcu.ptrTypeSema(.{
+ return pt.ptrTypeSema(.{
.child = elem_ty.toIntern(),
.flags = .{
.alignment = alignment,
@@ -3944,6 +3958,7 @@ pub const @"u16": Type = .{ .ip_index = .u16_type };
pub const @"u29": Type = .{ .ip_index = .u29_type };
pub const @"u32": Type = .{ .ip_index = .u32_type };
pub const @"u64": Type = .{ .ip_index = .u64_type };
+pub const @"u80": Type = .{ .ip_index = .u80_type };
pub const @"u128": Type = .{ .ip_index = .u128_type };
pub const @"i8": Type = .{ .ip_index = .i8_type };