diff options
| author | mlugg <mlugg@mlugg.co.uk> | 2024-07-04 05:00:32 +0100 |
|---|---|---|
| committer | mlugg <mlugg@mlugg.co.uk> | 2024-07-04 21:01:42 +0100 |
| commit | 0e5335aaf5e0ac646fbd46a319710019d10c2971 (patch) | |
| tree | bb5a2e4184a64985f8e8788f0d11b867213aaf3b /src/Sema.zig | |
| parent | 2f0f1efa6fa50ca27a44d5f7a0c38a6cafbbfb7c (diff) | |
| download | zig-0e5335aaf5e0ac646fbd46a319710019d10c2971.tar.gz zig-0e5335aaf5e0ac646fbd46a319710019d10c2971.zip | |
compiler: rework type resolution, fully resolve all types
I'm so sorry.
This commit was just meant to be making all types fully resolve by
queueing resolution at the moment of their creation. Unfortunately, a
lot of dominoes ended up falling. Here's what happened:
* I added a work queue job to fully resolve a type.
* I realised that from here we could eliminate `Sema.types_to_resolve`
if we made function codegen a separate job. This is desirable for
simplicity of both spec and implementation.
* This led to a new AIR traversal to detect whether any required type is
unresolved. If a type in the AIR failed to resolve, then we can't run
codegen.
* Because full type resolution now occurs by the work queue job, a bug
was exposed whereby error messages for type resolution were associated
with the wrong `Decl`, resulting in duplicate error messages when the
type was also resolved "by" its owner `Decl` (which really *all*
resolution should be done on).
* A correct fix for this requires using a different `Sema` when
performing type resolution: we need a `Sema` owned by the type. Also
note that this fix is necessary for incremental compilation.
* This means a whole bunch of functions no longer need to take `Sema`s.
* First-order effects: `resolveTypeFields`, `resolveTypeLayout`, etc
* Second-order effects: `Type.abiAlignmentAdvanced`, `Value.orderAgainstZeroAdvanced`, etc
The end result of this is, in short, a more correct compiler and a
simpler language specification. This regressed a few error notes in the
test cases, but nothing that seems worth blocking this change.
Oh, also, I ripped out the old code in `test/src/Cases.zig` which
introduced a dependency on `Compilation`. This dependency was
problematic at best, and this code has been unused for a while. When we
re-enable incremental test cases, we must rewrite their executor to use
the compiler server protocol.
Diffstat (limited to 'src/Sema.zig')
| -rw-r--r-- | src/Sema.zig | 1458 |
1 files changed, 430 insertions, 1028 deletions
diff --git a/src/Sema.zig b/src/Sema.zig index 57b2c897a1..9dfbc724eb 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -64,14 +64,6 @@ generic_owner: InternPool.Index = .none, /// instantiation can point back to the instantiation site in addition to the /// declaration site. generic_call_src: LazySrcLoc = LazySrcLoc.unneeded, -/// The key is types that must be fully resolved prior to machine code -/// generation pass. Types are added to this set when resolving them -/// immediately could cause a dependency loop, but they do need to be resolved -/// before machine code generation passes process the AIR. -/// It would work fine if this were an array list instead of an array hash map. -/// I chose array hash map with the intention to save time by omitting -/// duplicates. -types_to_resolve: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, /// These are lazily created runtime blocks from block_inline instructions. /// They are created when an break_inline passes through a runtime condition, because /// Sema must convert comptime control flow to runtime control flow, which means @@ -872,7 +864,6 @@ pub fn deinit(sema: *Sema) void { sema.air_extra.deinit(gpa); sema.inst_map.deinit(gpa); sema.decl_val_table.deinit(gpa); - sema.types_to_resolve.deinit(gpa); { var it = sema.post_hoc_blocks.iterator(); while (it.next()) |entry| { @@ -2078,8 +2069,8 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) const addrs_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(addr_arr_ty)); // var st: StackTrace = undefined; - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const st_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(stack_trace_ty)); // st.instruction_addresses = &addrs; @@ -2628,7 +2619,7 @@ fn analyzeAsInt( const mod = sema.mod; const coerced = try sema.coerce(block, dest_ty, air_ref, src); const val = try sema.resolveConstDefinedValue(block, src, coerced, reason); - return (try val.getUnsignedIntAdvanced(mod, sema)).?; + return (try val.getUnsignedIntAdvanced(mod, .sema)).?; } /// Given a ZIR extra index which points to a list of `Zir.Inst.Capture`, @@ -2832,6 +2823,7 @@ fn zirStructDecl( } try mod.finalizeAnonDecl(new_decl_index); + try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index)); } @@ -3332,7 +3324,7 @@ fn zirUnionDecl( } try mod.finalizeAnonDecl(new_decl_index); - + try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index)); } @@ -3457,12 +3449,12 @@ fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { defer tracy.end(); if (block.is_comptime or try sema.typeRequiresComptime(sema.fn_ret_ty)) { - try sema.resolveTypeFields(sema.fn_ret_ty); + try sema.fn_ret_ty.resolveFields(sema.mod); return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty, .none); } const target = sema.mod.getTarget(); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try sema.mod.ptrTypeSema(.{ .child = sema.fn_ret_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -3471,7 +3463,6 @@ fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { // We are inlining a function call; this should be emitted as an alloc, not a ret_ptr. // TODO when functions gain result location support, the inlining struct in // Block should contain the return pointer, and we would pass that through here. - try sema.queueFullTypeResolution(sema.fn_ret_ty); return block.addTy(.alloc, ptr_type); } @@ -3667,8 +3658,8 @@ fn zirAllocExtended( try sema.validateVarType(block, ty_src, var_ty, false); } const target = sema.mod.getTarget(); - try sema.resolveTypeLayout(var_ty); - const ptr_type = try sema.ptrType(.{ + try var_ty.resolveLayout(sema.mod); + const ptr_type = try sema.mod.ptrTypeSema(.{ .child = var_ty.toIntern(), .flags = .{ .alignment = alignment, @@ -3902,7 +3893,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, const idx_val = (try sema.resolveValue(data.rhs)).?; break :blk .{ data.lhs, - .{ .elem = try idx_val.toUnsignedIntAdvanced(sema) }, + .{ .elem = try idx_val.toUnsignedIntSema(zcu) }, }; }, .bitcast => .{ @@ -3940,7 +3931,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, .val = payload_val.toIntern(), } }); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), Value.fromInterned(opt_val), opt_ty); - break :ptr (try Value.fromInterned(decl_parent_ptr).ptrOptPayload(sema)).toIntern(); + break :ptr (try Value.fromInterned(decl_parent_ptr).ptrOptPayload(zcu)).toIntern(); }, .eu_payload => ptr: { // Set the error union to non-error at comptime. @@ -3953,7 +3944,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, .val = .{ .payload = payload_val.toIntern() }, } }); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), Value.fromInterned(eu_val), eu_ty); - break :ptr (try Value.fromInterned(decl_parent_ptr).ptrEuPayload(sema)).toIntern(); + break :ptr (try Value.fromInterned(decl_parent_ptr).ptrEuPayload(zcu)).toIntern(); }, .field => |idx| ptr: { const maybe_union_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu); @@ -3967,9 +3958,9 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, const store_val = try zcu.unionValue(maybe_union_ty, tag_val, payload_val); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), store_val, maybe_union_ty); } - break :ptr (try Value.fromInterned(decl_parent_ptr).ptrField(idx, sema)).toIntern(); + break :ptr (try Value.fromInterned(decl_parent_ptr).ptrField(idx, zcu)).toIntern(); }, - .elem => |idx| (try Value.fromInterned(decl_parent_ptr).ptrElem(idx, sema)).toIntern(), + .elem => |idx| (try Value.fromInterned(decl_parent_ptr).ptrElem(idx, zcu)).toIntern(), }; try ptr_mapping.put(air_ptr, new_ptr); } @@ -4060,7 +4051,7 @@ fn finishResolveComptimeKnownAllocPtr( fn makePtrTyConst(sema: *Sema, ptr_ty: Type) CompileError!Type { var ptr_info = ptr_ty.ptrInfo(sema.mod); ptr_info.flags.is_const = true; - return sema.ptrType(ptr_info); + return sema.mod.ptrTypeSema(ptr_info); } fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Air.Inst.Ref { @@ -4103,11 +4094,10 @@ fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I return sema.analyzeComptimeAlloc(block, var_ty, .none); } const target = sema.mod.getTarget(); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try sema.mod.ptrTypeSema(.{ .child = var_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); - try sema.queueFullTypeResolution(var_ty); const ptr = try block.addTy(.alloc, ptr_type); const ptr_inst = ptr.toIndex().?; try sema.maybe_comptime_allocs.put(sema.gpa, ptr_inst, .{ .runtime_index = block.runtime_index }); @@ -4127,11 +4117,10 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } try sema.validateVarType(block, ty_src, var_ty, false); const target = sema.mod.getTarget(); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try sema.mod.ptrTypeSema(.{ .child = var_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); - try sema.queueFullTypeResolution(var_ty); return block.addTy(.alloc, ptr_type); } @@ -4227,7 +4216,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com } const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_vals, .none); - const final_ptr_ty = try sema.ptrType(.{ + const final_ptr_ty = try mod.ptrTypeSema(.{ .child = final_elem_ty.toIntern(), .flags = .{ .alignment = ia1.alignment, @@ -4247,7 +4236,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com // Unless the block is comptime, `alloc_inferred` always produces // a runtime constant. The final inferred type needs to be // fully resolved so it can be lowered in codegen. - try sema.resolveTypeFully(final_elem_ty); + try final_elem_ty.resolveFully(mod); return; } @@ -4259,8 +4248,6 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com return sema.fail(block, src, "value with comptime-only type '{}' depends on runtime control flow", .{final_elem_ty.fmt(mod)}); } - try sema.queueFullTypeResolution(final_elem_ty); - // Change it to a normal alloc. sema.air_instructions.set(@intFromEnum(ptr_inst), .{ .tag = .alloc, @@ -4633,7 +4620,7 @@ fn validateArrayInitTy( return; }, .Struct => if (ty.isTuple(mod)) { - try sema.resolveTypeFields(ty); + try ty.resolveFields(mod); const array_len = ty.arrayLen(mod); if (init_count > array_len) { return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{ @@ -4911,7 +4898,7 @@ fn validateStructInit( if (block.is_comptime and (try sema.resolveDefinedValue(block, init_src, struct_ptr)) != null) { - try sema.resolveStructLayout(struct_ty); + try struct_ty.resolveLayout(mod); // In this case the only thing we need to do is evaluate the implicit // store instructions for default field values, and report any missing fields. // Avoid the cost of the extra machinery for detecting a comptime struct init value. @@ -4919,7 +4906,7 @@ fn validateStructInit( const i: u32 = @intCast(i_usize); if (field_ptr != .none) continue; - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const default_val = struct_ty.structFieldDefaultValue(i, mod); if (default_val.toIntern() == .unreachable_value) { const field_name = struct_ty.structFieldName(i, mod).unwrap() orelse { @@ -4968,7 +4955,7 @@ fn validateStructInit( const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); // We collect the comptime field values in case the struct initialization // ends up being comptime-known. @@ -5127,7 +5114,7 @@ fn validateStructInit( try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store); return; } - try sema.resolveStructLayout(struct_ty); + try struct_ty.resolveLayout(mod); // Our task is to insert `store` instructions for all the default field values. for (found_fields, 0..) |field_ptr, i| { @@ -5172,7 +5159,7 @@ fn zirValidatePtrArrayInit( var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); - try sema.resolveStructFieldInits(array_ty); + try array_ty.resolveStructFieldInits(mod); var i = instrs.len; while (i < array_len) : (i += 1) { const default_val = array_ty.structFieldDefaultValue(i, mod).toIntern(); @@ -5241,7 +5228,7 @@ fn zirValidatePtrArrayInit( if (array_ty.isTuple(mod)) { if (array_ty.structFieldIsComptime(i, mod)) - try sema.resolveStructFieldInits(array_ty); + try array_ty.resolveStructFieldInits(mod); if (try array_ty.structFieldValueComptime(mod, i)) |opv| { element_vals[i] = opv.toIntern(); continue; @@ -5581,7 +5568,7 @@ fn storeToInferredAllocComptime( .needed_comptime_reason = "value being stored to a comptime variable must be comptime-known", }); }; - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try zcu.ptrTypeSema(.{ .child = operand_ty.toIntern(), .flags = .{ .alignment = iac.alignment, @@ -5688,7 +5675,7 @@ fn anonDeclRef(sema: *Sema, val: InternPool.Index) CompileError!Air.Inst.Ref { fn refValue(sema: *Sema, val: InternPool.Index) CompileError!InternPool.Index { const mod = sema.mod; - const ptr_ty = (try sema.ptrType(.{ + const ptr_ty = (try mod.ptrTypeSema(.{ .child = mod.intern_pool.typeOf(val), .flags = .{ .alignment = .none, @@ -6645,8 +6632,6 @@ fn addDbgVar( // real `block` instruction. if (block.need_debug_scope) |ptr| ptr.* = true; - try sema.queueFullTypeResolution(operand_ty); - // Add the name to the AIR. const name_extra_index: u32 = @intCast(sema.air_extra.items.len); const elements_used = name.len / 4 + 1; @@ -6832,14 +6817,8 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref if (!block.ownerModule().error_tracing) return .none; - const stack_trace_ty = sema.getBuiltinType("StackTrace") catch |err| switch (err) { - error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, - else => |e| return e, - }; - sema.resolveTypeFields(stack_trace_ty) catch |err| switch (err) { - error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, - else => |e| return e, - }; + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, LazySrcLoc.unneeded) catch |err| switch (err) { error.AnalysisFail => @panic("std.builtin.StackTrace is corrupt"), @@ -6879,8 +6858,8 @@ fn popErrorReturnTrace( // AstGen determined this result does not go to an error-handling expr (try/catch/return etc.), or // the result is comptime-known to be a non-error. Either way, pop unconditionally. - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); @@ -6905,8 +6884,8 @@ fn popErrorReturnTrace( defer then_block.instructions.deinit(gpa); // If non-error, then pop the error return trace by restoring the index. - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty); const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); @@ -7032,8 +7011,8 @@ fn zirCall( // If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only // need to clean-up our own trace if we were passed to a non-error-handling expression. if (input_is_error or (pop_error_return_trace and return_ty.isError(mod))) { - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "index", .no_embedded_nulls); const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src); @@ -7264,10 +7243,6 @@ const CallArgsInfo = union(enum) { ) CompileError!Air.Inst.Ref { const mod = sema.mod; const param_count = func_ty_info.param_types.len; - if (maybe_param_ty) |param_ty| switch (param_ty.toIntern()) { - .generic_poison_type => {}, - else => try sema.queueFullTypeResolution(param_ty), - }; const uncoerced_arg: Air.Inst.Ref = switch (cai) { inline .resolved, .call_builtin => |resolved| resolved.args[arg_index], .zir_call => |zir_call| arg_val: { @@ -7494,24 +7469,19 @@ fn analyzeCall( const gpa = sema.gpa; - var is_generic_call = func_ty_info.is_generic; + const is_generic_call = func_ty_info.is_generic; var is_comptime_call = block.is_comptime or modifier == .compile_time; var is_inline_call = is_comptime_call or modifier == .always_inline or func_ty_info.cc == .Inline; var comptime_reason: ?*const Block.ComptimeReason = null; if (!is_inline_call and !is_comptime_call) { - if (sema.typeRequiresComptime(Type.fromInterned(func_ty_info.return_type))) |ct| { - is_comptime_call = ct; - is_inline_call = ct; - if (ct) { - comptime_reason = &.{ .comptime_ret_ty = .{ - .func = func, - .func_src = func_src, - .return_ty = Type.fromInterned(func_ty_info.return_type), - } }; - } - } else |err| switch (err) { - error.GenericPoison => is_generic_call = true, - else => |e| return e, + if (try sema.typeRequiresComptime(Type.fromInterned(func_ty_info.return_type))) { + is_comptime_call = true; + is_inline_call = true; + comptime_reason = &.{ .comptime_ret_ty = .{ + .func = func, + .func_src = func_src, + .return_ty = Type.fromInterned(func_ty_info.return_type), + } }; } } @@ -7871,7 +7841,6 @@ fn analyzeCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - try sema.queueFullTypeResolution(Type.fromInterned(func_ty_info.return_type)); if (sema.owner_func_index != .none and Type.fromInterned(func_ty_info.return_type).isError(mod)) { ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true; } @@ -8281,7 +8250,6 @@ fn instantiateGenericCall( } } else { // The parameter is runtime-known. - try sema.queueFullTypeResolution(arg_ty); child_sema.inst_map.putAssumeCapacityNoClobber(param_inst, try child_block.addInst(.{ .tag = .arg, .data = .{ .arg = .{ @@ -8330,8 +8298,6 @@ fn instantiateGenericCall( return error.GenericPoison; } - try sema.queueFullTypeResolution(Type.fromInterned(func_ty_info.return_type)); - if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); if (sema.owner_func_index != .none and @@ -8423,7 +8389,7 @@ fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil else => |e| return e, }; const indexable_ty = maybe_wrapped_indexable_ty.optEuBaseType(mod); - try sema.resolveTypeFields(indexable_ty); + try indexable_ty.resolveFields(mod); assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction if (indexable_ty.zigTypeTag(mod) == .Struct) { const elem_type = indexable_ty.structFieldType(@intFromEnum(bin.rhs), mod); @@ -8687,7 +8653,7 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD const operand = try sema.coerce(block, err_int_ty, uncasted_operand, operand_src); if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| { - const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntAdvanced(sema)); + const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntSema(mod)); if (int > mod.global_error_set.count() or int == 0) return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int}); return Air.internedToRef((try mod.intern(.{ .err = .{ @@ -8791,7 +8757,7 @@ fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(mod)) { .Enum => operand, .Union => blk: { - try sema.resolveTypeFields(operand_ty); + try operand_ty.resolveFields(mod); const tag_ty = operand_ty.unionTagType(mod) orelse { return sema.fail( block, @@ -8933,7 +8899,7 @@ fn analyzeOptionalPayloadPtr( } const child_type = opt_type.optionalChild(zcu); - const child_pointer = try sema.ptrType(.{ + const child_pointer = try zcu.ptrTypeSema(.{ .child = child_type.toIntern(), .flags = .{ .is_const = optional_ptr_ty.isConstPtr(zcu), @@ -8957,13 +8923,13 @@ fn analyzeOptionalPayloadPtr( const opt_payload_ptr = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr); try sema.checkKnownAllocPtr(block, optional_ptr, opt_payload_ptr); } - return Air.internedToRef((try ptr_val.ptrOptPayload(sema)).toIntern()); + return Air.internedToRef((try ptr_val.ptrOptPayload(zcu)).toIntern()); } if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| { if (val.isNull(zcu)) { return sema.fail(block, src, "unable to unwrap null", .{}); } - return Air.internedToRef((try ptr_val.ptrOptPayload(sema)).toIntern()); + return Air.internedToRef((try ptr_val.ptrOptPayload(zcu)).toIntern()); } } @@ -9006,7 +8972,7 @@ fn zirOptionalPayload( // TODO https://github.com/ziglang/zig/issues/6597 if (true) break :t operand_ty; const ptr_info = operand_ty.ptrInfo(mod); - break :t try sema.ptrType(.{ + break :t try mod.ptrTypeSema(.{ .child = ptr_info.child, .flags = .{ .alignment = ptr_info.flags.alignment, @@ -9124,7 +9090,7 @@ fn analyzeErrUnionPayloadPtr( const err_union_ty = operand_ty.childType(zcu); const payload_ty = err_union_ty.errorUnionPayload(zcu); - const operand_pointer_ty = try sema.ptrType(.{ + const operand_pointer_ty = try zcu.ptrTypeSema(.{ .child = payload_ty.toIntern(), .flags = .{ .is_const = operand_ty.isConstPtr(zcu), @@ -9149,13 +9115,13 @@ fn analyzeErrUnionPayloadPtr( const eu_payload_ptr = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand); try sema.checkKnownAllocPtr(block, operand, eu_payload_ptr); } - return Air.internedToRef((try ptr_val.ptrEuPayload(sema)).toIntern()); + return Air.internedToRef((try ptr_val.ptrEuPayload(zcu)).toIntern()); } if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| { if (val.getErrorName(zcu).unwrap()) |name| { return sema.failWithComptimeErrorRetTrace(block, src, name); } - return Air.internedToRef((try ptr_val.ptrEuPayload(sema)).toIntern()); + return Air.internedToRef((try ptr_val.ptrEuPayload(zcu)).toIntern()); } } @@ -9603,17 +9569,8 @@ fn funcCommon( } } - var ret_ty_requires_comptime = false; - const ret_poison = if (sema.typeRequiresComptime(bare_return_type)) |ret_comptime| rp: { - ret_ty_requires_comptime = ret_comptime; - break :rp bare_return_type.isGenericPoison(); - } else |err| switch (err) { - error.GenericPoison => rp: { - is_generic = true; - break :rp true; - }, - else => |e| return e, - }; + const ret_ty_requires_comptime = try sema.typeRequiresComptime(bare_return_type); + const ret_poison = bare_return_type.isGenericPoison(); const final_is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime; const param_types = block.params.items(.ty); @@ -9961,8 +9918,8 @@ fn finishFunc( if (!final_is_generic and sema.wantErrorReturnTracing(return_type)) { // Make sure that StackTrace's fields are resolved so that the backend can // lower this fn type. - const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(unresolved_stack_trace_ty); + const unresolved_stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try unresolved_stack_trace_ty.resolveFields(mod); } return Air.internedToRef(if (opt_func_index != .none) opt_func_index else func_ty); @@ -10021,21 +9978,7 @@ fn zirParam( } }; - const is_comptime = sema.typeRequiresComptime(param_ty) catch |err| switch (err) { - error.GenericPoison => { - // The type is not available until the generic instantiation. - // We result the param instruction with a poison value and - // insert an anytype parameter. - try block.params.append(sema.arena, .{ - .ty = .generic_poison_type, - .is_comptime = comptime_syntax, - .name = param_name, - }); - sema.inst_map.putAssumeCapacity(inst, .generic_poison); - return; - }, - else => |e| return e, - } or comptime_syntax; + const is_comptime = try sema.typeRequiresComptime(param_ty) or comptime_syntax; try block.params.append(sema.arena, .{ .ty = param_ty.toIntern(), @@ -10162,7 +10105,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } return Air.internedToRef((try zcu.intValue( Type.usize, - (try operand_val.getUnsignedIntAdvanced(zcu, sema)).?, + (try operand_val.getUnsignedIntAdvanced(zcu, .sema)).?, )).toIntern()); } const len = operand_ty.vectorLen(zcu); @@ -10174,7 +10117,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! new_elem.* = (try zcu.undefValue(Type.usize)).toIntern(); continue; } - const addr = try ptr_val.getUnsignedIntAdvanced(zcu, sema) orelse { + const addr = try ptr_val.getUnsignedIntAdvanced(zcu, .sema) orelse { // A vector element wasn't an integer pointer. This is a runtime operation. break :ct; }; @@ -11047,7 +10990,7 @@ const SwitchProngAnalysis = struct { const union_obj = zcu.typeToUnion(operand_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); if (capture_byref) { - const ptr_field_ty = try sema.ptrType(.{ + const ptr_field_ty = try zcu.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = !operand_ptr_ty.ptrIsMutable(zcu), @@ -11056,7 +10999,7 @@ const SwitchProngAnalysis = struct { }, }); if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |union_ptr| { - return Air.internedToRef((try union_ptr.ptrField(field_index, sema)).toIntern()); + return Air.internedToRef((try union_ptr.ptrField(field_index, zcu)).toIntern()); } return block.addStructFieldPtr(spa.operand_ptr, field_index, ptr_field_ty); } else { @@ -11150,7 +11093,7 @@ const SwitchProngAnalysis = struct { const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len); for (field_indices, dummy_captures) |field_idx, *dummy| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); - const field_ptr_ty = try sema.ptrType(.{ + const field_ptr_ty = try zcu.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = operand_ptr_info.flags.is_const, @@ -11186,7 +11129,7 @@ const SwitchProngAnalysis = struct { if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |op_ptr_val| { if (op_ptr_val.isUndef(zcu)) return zcu.undefRef(capture_ptr_ty); - const field_ptr_val = try op_ptr_val.ptrField(first_field_index, sema); + const field_ptr_val = try op_ptr_val.ptrField(first_field_index, zcu); return Air.internedToRef((try zcu.getCoerced(field_ptr_val, capture_ptr_ty)).toIntern()); } @@ -11399,7 +11342,7 @@ fn switchCond( }, .Union => { - try sema.resolveTypeFields(operand_ty); + try operand_ty.resolveFields(mod); const enum_ty = operand_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(src, "switch on union with no attached enum", .{}); @@ -13691,7 +13634,7 @@ fn maybeErrorUnwrap( return true; } - const panic_fn = try sema.getBuiltin("panicUnwrapError"); + const panic_fn = try mod.getBuiltin("panicUnwrapError"); const err_return_trace = try sema.getErrorReturnTrace(block); const args: [2]Air.Inst.Ref = .{ err_return_trace, operand }; try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check"); @@ -13701,7 +13644,7 @@ fn maybeErrorUnwrap( const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const msg_inst = try sema.resolveInst(inst_data.operand); - const panic_fn = try sema.getBuiltin("panic"); + const panic_fn = try mod.getBuiltin("panic"); const err_return_trace = try sema.getErrorReturnTrace(block); const args: [3]Air.Inst.Ref = .{ msg_inst, err_return_trace, .null_value }; try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check"); @@ -13766,7 +13709,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_name = try sema.resolveConstStringIntern(block, name_src, extra.rhs, .{ .needed_comptime_reason = "field name must be comptime-known", }); - try sema.resolveTypeFields(ty); + try ty.resolveFields(mod); const ip = &mod.intern_pool; const has_field = hf: { @@ -13946,7 +13889,7 @@ fn zirShl( return mod.undefRef(sema.typeOf(lhs)); } // If rhs is 0, return lhs without doing any calculations. - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return lhs; } if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) { @@ -14111,7 +14054,7 @@ fn zirShr( return mod.undefRef(lhs_ty); } // If rhs is 0, return lhs without doing any calculations. - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return lhs; } if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) { @@ -14158,7 +14101,7 @@ fn zirShr( if (air_tag == .shr_exact) { // Detect if any ones would be shifted out. const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, mod); - if (!(try truncated.compareAllWithZeroAdvanced(.eq, sema))) { + if (!(try truncated.compareAllWithZeroSema(.eq, mod))) { return sema.fail(block, src, "exact shift shifted out 1 bits", .{}); } } @@ -14582,12 +14525,12 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.requireRuntimeBlock(block, src, runtime_src); if (ptr_addrspace) |ptr_as| { - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = ptr_as }, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try sema.ptrType(.{ + const elem_ptr_ty = try mod.ptrTypeSema(.{ .child = resolved_elem_ty.toIntern(), .flags = .{ .address_space = ptr_as }, }); @@ -14670,7 +14613,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins .none => null, else => Value.fromInterned(ptr_info.sentinel), }, - .len = try val.sliceLen(sema), + .len = try val.sliceLen(mod), }; }, .One => { @@ -14912,12 +14855,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (ptr_addrspace) |ptr_as| { - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = ptr_as }, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try sema.ptrType(.{ + const elem_ptr_ty = try mod.ptrTypeSema(.{ .child = lhs_info.elem_type.toIntern(), .flags = .{ .address_space = ptr_as }, }); @@ -15105,7 +15048,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins .Int, .ComptimeInt, .ComptimeFloat => { if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15120,7 +15063,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } // TODO: if the RHS is one, return the LHS directly @@ -15241,7 +15184,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } else { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15256,7 +15199,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } // TODO: if the RHS is one, return the LHS directly @@ -15408,7 +15351,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15423,7 +15366,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } // TODO: if the RHS is one, return the LHS directly @@ -15518,7 +15461,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15533,7 +15476,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } } @@ -15758,7 +15701,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15777,18 +15720,18 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.gte, mod))) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { const rem_result = try sema.intRem(resolved_type, lhs_val, rhs_val); // If this answer could possibly be different by doing `intMod`, // we must emit a compile error. Otherwise, it's OK. - if (!(try lhs_val.compareAllWithZeroAdvanced(.gte, sema)) and - !(try rem_result.compareAllWithZeroAdvanced(.eq, sema))) + if (!(try lhs_val.compareAllWithZeroSema(.gte, mod)) and + !(try rem_result.compareAllWithZeroSema(.eq, mod))) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } @@ -15806,14 +15749,14 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.gte, mod))) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))) { + if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroSema(.gte, mod))) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod)).toIntern()); @@ -15864,8 +15807,8 @@ fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileErr // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs_q = try sema.arena.alloc( math.big.Limb, lhs_bigint.limbs.len, @@ -15941,7 +15884,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } if (maybe_lhs_val) |lhs_val| { @@ -15957,7 +15900,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } } @@ -16036,7 +15979,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } if (maybe_lhs_val) |lhs_val| { @@ -16052,7 +15995,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } } @@ -16139,12 +16082,12 @@ fn zirOverflowArithmetic( // to the result, even if it is undefined.. // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, mod))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } @@ -16165,7 +16108,7 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; - } else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + } else if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { @@ -16184,7 +16127,7 @@ fn zirOverflowArithmetic( const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1); if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; @@ -16194,7 +16137,7 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod)) { - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; @@ -16218,12 +16161,12 @@ fn zirOverflowArithmetic( // If rhs is zero, the result is lhs (even if undefined) and no overflow occurred. // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, mod))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } @@ -16374,7 +16317,7 @@ fn analyzeArithmetic( // overflow (max_int), causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { return casted_rhs; } } @@ -16386,7 +16329,7 @@ fn analyzeArithmetic( return mod.undefRef(resolved_type); } } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } } @@ -16418,7 +16361,7 @@ fn analyzeArithmetic( // If either of the operands are zero, the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { return casted_rhs; } } @@ -16426,7 +16369,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -16439,7 +16382,7 @@ fn analyzeArithmetic( // If either of the operands are zero, then the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { return casted_rhs; } } @@ -16447,7 +16390,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -16488,7 +16431,7 @@ fn analyzeArithmetic( return mod.undefRef(resolved_type); } } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } } @@ -16523,7 +16466,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } } @@ -16544,7 +16487,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } } @@ -16591,7 +16534,7 @@ fn analyzeArithmetic( if (lhs_val.isNan(mod)) { return Air.internedToRef(lhs_val.toIntern()); } - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) lz: { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) lz: { if (maybe_rhs_val) |rhs_val| { if (rhs_val.isNan(mod)) { return Air.internedToRef(rhs_val.toIntern()); @@ -16622,7 +16565,7 @@ fn analyzeArithmetic( if (rhs_val.isNan(mod)) { return Air.internedToRef(rhs_val.toIntern()); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) rz: { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) rz: { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isInf(mod)) { return Air.internedToRef((try mod.floatValue(resolved_type, std.math.nan(f128))).toIntern()); @@ -16674,7 +16617,7 @@ fn analyzeArithmetic( }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16687,7 +16630,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16719,7 +16662,7 @@ fn analyzeArithmetic( }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16732,7 +16675,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16828,7 +16771,7 @@ fn analyzePtrArithmetic( const new_ptr_ty = t: { // Calculate the new pointer alignment. - // This code is duplicated in `elemPtrType`. + // This code is duplicated in `Type.elemPtrType`. if (ptr_info.flags.alignment == .none) { // ABI-aligned pointer. Any pointer arithmetic maintains the same ABI-alignedness. break :t ptr_ty; @@ -16837,7 +16780,7 @@ fn analyzePtrArithmetic( // it being a multiple of the type size. const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child)); const addend = if (opt_off_val) |off_val| a: { - const off_int = try sema.usizeCast(block, offset_src, try off_val.toUnsignedIntAdvanced(sema)); + const off_int = try sema.usizeCast(block, offset_src, try off_val.toUnsignedIntSema(mod)); break :a elem_size * off_int; } else elem_size; @@ -16850,7 +16793,7 @@ fn analyzePtrArithmetic( )); assert(new_align != .none); - break :t try sema.ptrType(.{ + break :t try mod.ptrTypeSema(.{ .child = ptr_info.child, .sentinel = ptr_info.sentinel, .flags = .{ @@ -16869,14 +16812,14 @@ fn analyzePtrArithmetic( if (opt_off_val) |offset_val| { if (ptr_val.isUndef(mod)) return mod.undefRef(new_ptr_ty); - const offset_int = try sema.usizeCast(block, offset_src, try offset_val.toUnsignedIntAdvanced(sema)); + const offset_int = try sema.usizeCast(block, offset_src, try offset_val.toUnsignedIntSema(mod)); if (offset_int == 0) return ptr; if (air_tag == .ptr_sub) { const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child)); const new_ptr_val = try sema.ptrSubtract(block, op_src, ptr_val, offset_int * elem_size, new_ptr_ty); return Air.internedToRef(new_ptr_val.toIntern()); } else { - const new_ptr_val = try mod.getCoerced(try ptr_val.ptrElem(offset_int, sema), new_ptr_ty); + const new_ptr_val = try mod.getCoerced(try ptr_val.ptrElem(offset_int, mod), new_ptr_ty); return Air.internedToRef(new_ptr_val.toIntern()); } } else break :rs offset_src; @@ -16975,7 +16918,6 @@ fn zirAsm( // Indicate the output is the asm instruction return value. arg.* = .none; const out_ty = try sema.resolveType(block, ret_ty_src, output.data.operand); - try sema.queueFullTypeResolution(out_ty); expr_ty = Air.internedToRef(out_ty.toIntern()); } else { arg.* = try sema.resolveInst(output.data.operand); @@ -17010,7 +16952,6 @@ fn zirAsm( .ComptimeFloat => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src), else => { arg.* = uncasted_arg; - try sema.queueFullTypeResolution(uncasted_arg_ty); }, } @@ -17169,7 +17110,7 @@ fn analyzeCmpUnionTag( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const union_ty = sema.typeOf(un); - try sema.resolveTypeFields(union_ty); + try union_ty.resolveFields(mod); const union_tag_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(un_src, "comparison of union and enum literal is only valid for tagged union types", .{}); @@ -17385,9 +17326,6 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. => {}, } const val = try ty.lazyAbiSize(mod); - if (val.isLazySize(mod)) { - try sema.queueFullTypeResolution(ty); - } return Air.internedToRef(val.toIntern()); } @@ -17427,7 +17365,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .AnyFrame, => {}, } - const bit_size = try operand_ty.bitSizeAdvanced(mod, sema); + const bit_size = try operand_ty.bitSizeAdvanced(mod, .sema); return mod.intRef(Type.comptime_int, bit_size); } @@ -17613,7 +17551,7 @@ fn zirBuiltinSrc( } }); }; - const src_loc_ty = try sema.getBuiltinType("SourceLocation"); + const src_loc_ty = try mod.getBuiltinType("SourceLocation"); const fields = .{ // file: [:0]const u8, file_name_val, @@ -17637,7 +17575,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ty = try sema.resolveType(block, src, inst_data.operand); - const type_info_ty = try sema.getBuiltinType("Type"); + const type_info_ty = try mod.getBuiltinType("Type"); const type_info_tag_ty = type_info_ty.unionTagType(mod).?; if (ty.typeDeclInst(mod)) |type_decl_inst| { @@ -17718,7 +17656,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = new_decl_ty.toIntern(), .storage = .{ .elems = param_vals }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = param_info_ty.toIntern(), .flags = .{ .size = .Slice, @@ -17748,7 +17686,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai func_ty_info.return_type, } }); - const callconv_ty = try sema.getBuiltinType("CallingConvention"); + const callconv_ty = try mod.getBuiltinType("CallingConvention"); const field_values = .{ // calling_convention: CallingConvention, @@ -17782,7 +17720,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const int_info_decl = mod.declPtr(int_info_decl_index); const int_info_ty = int_info_decl.val.toType(); - const signedness_ty = try sema.getBuiltinType("Signedness"); + const signedness_ty = try mod.getBuiltinType("Signedness"); const info = ty.intInfo(mod); const field_values = .{ // signedness: Signedness, @@ -17830,12 +17768,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try Type.fromInterned(info.child).lazyAbiAlignment(mod); - const addrspace_ty = try sema.getBuiltinType("AddressSpace"); + const addrspace_ty = try mod.getBuiltinType("AddressSpace"); const pointer_ty = t: { const decl_index = (try sema.namespaceLookup( block, src, - (try sema.getBuiltinType("Type")).getNamespaceIndex(mod), + (try mod.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, "Pointer", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); @@ -17984,8 +17922,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t set_field_ty_decl.val.toType(); }; - try sema.queueFullTypeResolution(error_field_ty); - // Build our list of Error values // Optional value is only null if anyerror // Value can be zero-length slice otherwise @@ -18036,7 +17972,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; // Build our ?[]const Error value - const slice_errors_ty = try sema.ptrType(.{ + const slice_errors_ty = try mod.ptrTypeSema(.{ .child = error_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18182,7 +18118,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = fields_array_ty.toIntern(), .storage = .{ .elems = enum_field_vals }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = enum_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18262,7 +18198,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t union_field_ty_decl.val.toType(); }; - try sema.resolveTypeLayout(ty); // Getting alignment requires type layout + try ty.resolveLayout(mod); // Getting alignment requires type layout const union_obj = mod.typeToUnion(ty).?; const tag_type = union_obj.loadTagType(ip); const layout = union_obj.getLayout(ip); @@ -18298,7 +18234,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; const alignment = switch (layout) { - .auto, .@"extern" => try sema.unionFieldAlignment(union_obj, @intCast(field_index)), + .auto, .@"extern" => try mod.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(field_index), .sema), .@"packed" => .none, }; @@ -18326,7 +18262,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = array_fields_ty.toIntern(), .storage = .{ .elems = union_field_vals }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = union_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18359,7 +18295,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const decl_index = (try sema.namespaceLookup( block, src, - (try sema.getBuiltinType("Type")).getNamespaceIndex(mod), + (try mod.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); @@ -18412,7 +18348,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t struct_field_ty_decl.val.toType(); }; - try sema.resolveTypeLayout(ty); // Getting alignment requires type layout + try ty.resolveLayout(mod); // Getting alignment requires type layout var struct_field_vals: []InternPool.Index = &.{}; defer gpa.free(struct_field_vals); @@ -18452,7 +18388,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }); }; - try sema.resolveTypeLayout(Type.fromInterned(field_ty)); + try Type.fromInterned(field_ty).resolveLayout(mod); const is_comptime = field_val != .none; const opt_default_val = if (is_comptime) Value.fromInterned(field_val) else null; @@ -18481,7 +18417,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; struct_field_vals = try gpa.alloc(InternPool.Index, struct_type.field_types.len); - try sema.resolveStructFieldInits(ty); + try ty.resolveStructFieldInits(mod); for (struct_field_vals, 0..) |*field_val, field_index| { const field_name = if (struct_type.fieldName(ip, field_index).unwrap()) |field_name| @@ -18520,10 +18456,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const default_val_ptr = try sema.optRefValue(opt_default_val); const alignment = switch (struct_type.layout) { .@"packed" => .none, - else => try sema.structFieldAlignment( + else => try mod.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, field_index), field_ty, struct_type.layout, + .sema, ), }; @@ -18555,7 +18492,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = array_fields_ty.toIntern(), .storage = .{ .elems = struct_field_vals }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = struct_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18591,7 +18528,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const decl_index = (try sema.namespaceLookup( block, src, - (try sema.getBuiltinType("Type")).getNamespaceIndex(mod), + (try mod.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); @@ -18635,7 +18572,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t type_opaque_ty_decl.val.toType(); }; - try sema.resolveTypeFields(ty); + try ty.resolveFields(mod); const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod)); const field_values = .{ @@ -18677,7 +18614,6 @@ fn typeInfoDecls( const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index); break :t declaration_ty_decl.val.toType(); }; - try sema.queueFullTypeResolution(declaration_ty); var decl_vals = std.ArrayList(InternPool.Index).init(gpa); defer decl_vals.deinit(); @@ -18695,7 +18631,7 @@ fn typeInfoDecls( .ty = array_decl_ty.toIntern(), .storage = .{ .elems = decl_vals.items }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = declaration_ty.toIntern(), .flags = .{ .size = .Slice, @@ -19295,7 +19231,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr const operand_ty = sema.typeOf(operand); const ptr_info = operand_ty.ptrInfo(mod); - const res_ty = try sema.ptrType(.{ + const res_ty = try mod.ptrTypeSema(.{ .child = err_union_ty.errorUnionPayload(mod).toIntern(), .flags = .{ .is_const = ptr_info.flags.is_const, @@ -19528,11 +19464,11 @@ fn retWithErrTracing( else => true, }; const gpa = sema.gpa; - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); - const return_err_fn = try sema.getBuiltin("returnError"); + const return_err_fn = try mod.getBuiltin("returnError"); const args: [1]Air.Inst.Ref = .{err_return_trace}; if (!need_check) { @@ -19735,7 +19671,7 @@ fn analyzeRet( return sema.failWithOwnedErrorMsg(block, msg); } - try sema.resolveTypeLayout(sema.fn_ret_ty); + try sema.fn_ret_ty.resolveLayout(mod); try sema.validateRuntimeValue(block, operand_src, operand); @@ -19817,7 +19753,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }, else => {}, } - const align_bytes = (try val.getUnsignedIntAdvanced(mod, sema)).?; + const align_bytes = (try val.getUnsignedIntAdvanced(mod, .sema)).?; break :blk try sema.validateAlignAllowZero(block, align_src, align_bytes); } else .none; @@ -19851,7 +19787,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air elem_ty.fmt(mod), bit_offset, bit_offset - host_size * 8, host_size, }); } - const elem_bit_size = try elem_ty.bitSizeAdvanced(mod, sema); + const elem_bit_size = try elem_ty.bitSizeAdvanced(mod, .sema); if (elem_bit_size > host_size * 8 - bit_offset) { return sema.fail(block, bitoffset_src, "packed type '{}' at bit offset {} ends {} bits after the end of a {} byte host integer", .{ elem_ty.fmt(mod), bit_offset, elem_bit_size - (host_size * 8 - bit_offset), host_size, @@ -19892,7 +19828,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }); } - const ty = try sema.ptrType(.{ + const ty = try mod.ptrTypeSema(.{ .child = elem_ty.toIntern(), .sentinel = sentinel, .flags = .{ @@ -19983,7 +19919,7 @@ fn structInitEmpty( const mod = sema.mod; const gpa = sema.gpa; // This logic must be synchronized with that in `zirStructInit`. - try sema.resolveTypeFields(struct_ty); + try struct_ty.resolveFields(mod); // The init values to use for the struct instance. const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount(mod)); @@ -20054,7 +19990,6 @@ fn unionInit( try sema.requireRuntimeBlock(block, init_src, null); _ = union_ty_src; - try sema.queueFullTypeResolution(union_ty); return block.addUnionInit(union_ty, field_index, init); } @@ -20083,7 +20018,7 @@ fn zirStructInit( else => |e| return e, }; const resolved_ty = result_ty.optEuBaseType(mod); - try sema.resolveTypeLayout(resolved_ty); + try resolved_ty.resolveLayout(mod); if (resolved_ty.zigTypeTag(mod) == .Struct) { // This logic must be synchronized with that in `zirStructInitEmpty`. @@ -20124,7 +20059,7 @@ fn zirStructInit( const field_ty = resolved_ty.structFieldType(field_index, mod); field_inits[field_index] = try sema.coerce(block, field_ty, uncoerced_init, field_src); if (!is_packed) { - try sema.resolveStructFieldInits(resolved_ty); + try resolved_ty.resolveStructFieldInits(mod); if (try resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| { const init_val = (try sema.resolveValue(field_inits[field_index])) orelse { return sema.failWithNeededComptime(block, field_src, .{ @@ -20197,7 +20132,7 @@ fn zirStructInit( if (is_ref) { const target = mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20211,7 +20146,6 @@ fn zirStructInit( } try sema.requireRuntimeBlock(block, src, null); - try sema.queueFullTypeResolution(resolved_ty); const union_val = try block.addUnionInit(resolved_ty, field_index, init_inst); return sema.coerce(block, result_ty, union_val, src); } @@ -20288,7 +20222,7 @@ fn finishStructInit( continue; } - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const field_init = struct_type.fieldInit(ip, i); if (field_init == .none) { @@ -20358,9 +20292,9 @@ fn finishStructInit( } if (is_ref) { - try sema.resolveStructLayout(struct_ty); + try struct_ty.resolveLayout(mod); const target = sema.mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20380,8 +20314,7 @@ fn finishStructInit( .init_node_offset = init_src.offset.node_offset.x, .elem_index = @intCast(runtime_index), } })); - try sema.resolveStructFieldInits(struct_ty); - try sema.queueFullTypeResolution(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const struct_val = try block.addAggregateInit(struct_ty, field_inits); return sema.coerce(block, result_ty, struct_val, init_src); } @@ -20490,7 +20423,7 @@ fn structInitAnon( if (is_ref) { const target = mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = tuple_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20504,7 +20437,7 @@ fn structInitAnon( }; extra_index = item.end; - const field_ptr_ty = try sema.ptrType(.{ + const field_ptr_ty = try mod.ptrTypeSema(.{ .child = field_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20597,7 +20530,7 @@ fn zirArrayInit( dest.* = try sema.coerce(block, elem_ty, resolved_arg, elem_src); if (is_tuple) { if (array_ty.structFieldIsComptime(i, mod)) - try sema.resolveStructFieldInits(array_ty); + try array_ty.resolveStructFieldInits(mod); if (try array_ty.structFieldValueComptime(mod, i)) |field_val| { const init_val = try sema.resolveValue(dest.*) orelse { return sema.failWithNeededComptime(block, elem_src, .{ @@ -20641,11 +20574,10 @@ fn zirArrayInit( .init_node_offset = src.offset.node_offset.x, .elem_index = runtime_index, } })); - try sema.queueFullTypeResolution(array_ty); if (is_ref) { const target = mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20654,7 +20586,7 @@ fn zirArrayInit( if (is_tuple) { for (resolved_args, 0..) |arg, i| { - const elem_ptr_ty = try sema.ptrType(.{ + const elem_ptr_ty = try mod.ptrTypeSema(.{ .child = array_ty.structFieldType(i, mod).toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20667,7 +20599,7 @@ fn zirArrayInit( return sema.makePtrConst(block, alloc); } - const elem_ptr_ty = try sema.ptrType(.{ + const elem_ptr_ty = try mod.ptrTypeSema(.{ .child = array_ty.elemType2(mod).toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20755,14 +20687,14 @@ fn arrayInitAnon( if (is_ref) { const target = sema.mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = tuple_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const alloc = try block.addTy(.alloc, alloc_ty); for (operands, 0..) |operand, i_usize| { const i: u32 = @intCast(i_usize); - const field_ptr_ty = try sema.ptrType(.{ + const field_ptr_ty = try mod.ptrTypeSema(.{ .child = types[i], .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20832,7 +20764,7 @@ fn fieldType( const ip = &mod.intern_pool; var cur_ty = aggregate_ty; while (true) { - try sema.resolveTypeFields(cur_ty); + try cur_ty.resolveFields(mod); switch (cur_ty.zigTypeTag(mod)) { .Struct => switch (ip.indexToKey(cur_ty.toIntern())) { .anon_struct_type => |anon_struct| { @@ -20883,8 +20815,8 @@ fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { const mod = sema.mod; const ip = &mod.intern_pool; - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern()); @@ -20918,9 +20850,6 @@ fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)}); } const val = try ty.lazyAbiAlignment(mod); - if (val.isLazyAlign(mod)) { - try sema.queueFullTypeResolution(ty); - } return Air.internedToRef(val.toIntern()); } @@ -21095,7 +21024,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeLayout(operand_ty); + try operand_ty.resolveLayout(mod); const enum_ty = switch (operand_ty.zigTypeTag(mod)) { .EnumLiteral => { const val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, operand, undefined); @@ -21171,7 +21100,7 @@ fn zirReify( }, }, }; - const type_info_ty = try sema.getBuiltinType("Type"); + const type_info_ty = try mod.getBuiltinType("Type"); const uncasted_operand = try sema.resolveInst(extra.operand); const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src); const val = try sema.resolveConstDefinedValue(block, operand_src, type_info, .{ @@ -21205,7 +21134,7 @@ fn zirReify( ); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); - const bits: u16 = @intCast(try bits_val.toUnsignedIntAdvanced(sema)); + const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(mod)); const ty = try mod.intType(signedness, bits); return Air.internedToRef(ty.toIntern()); }, @@ -21220,7 +21149,7 @@ fn zirReify( try ip.getOrPutString(gpa, "child", .no_embedded_nulls), ).?); - const len: u32 = @intCast(try len_val.toUnsignedIntAdvanced(sema)); + const len: u32 = @intCast(try len_val.toUnsignedIntSema(mod)); const child_ty = child_val.toType(); try sema.checkVectorElemType(block, src, child_ty); @@ -21238,7 +21167,7 @@ fn zirReify( try ip.getOrPutString(gpa, "bits", .no_embedded_nulls), ).?); - const bits: u16 = @intCast(try bits_val.toUnsignedIntAdvanced(sema)); + const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(mod)); const ty = switch (bits) { 16 => Type.f16, 32 => Type.f32, @@ -21288,7 +21217,7 @@ fn zirReify( return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?; + const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(mod, .sema)).?; if (alignment_val_int > 0 and !math.isPowerOfTwo(alignment_val_int)) { return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{alignment_val_int}); } @@ -21296,7 +21225,7 @@ fn zirReify( const elem_ty = child_val.toType(); if (abi_align != .none) { - try sema.resolveTypeLayout(elem_ty); + try elem_ty.resolveLayout(mod); } const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val); @@ -21340,7 +21269,7 @@ fn zirReify( } } - const ty = try sema.ptrType(.{ + const ty = try mod.ptrTypeSema(.{ .child = elem_ty.toIntern(), .sentinel = actual_sentinel, .flags = .{ @@ -21369,7 +21298,7 @@ fn zirReify( try ip.getOrPutString(gpa, "sentinel", .no_embedded_nulls), ).?); - const len = try len_val.toUnsignedIntAdvanced(sema); + const len = try len_val.toUnsignedIntSema(mod); const child_ty = child_val.toType(); const sentinel = if (sentinel_val.optionalValue(mod)) |p| blk: { const ptr_ty = try mod.singleMutPtrType(child_ty); @@ -21476,7 +21405,7 @@ fn zirReify( const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); // Decls - if (try decls_val.sliceLen(sema) > 0) { + if (try decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified structs must have no decls", .{}); } @@ -21509,7 +21438,7 @@ fn zirReify( try ip.getOrPutString(gpa, "is_exhaustive", .no_embedded_nulls), ).?); - if (try decls_val.sliceLen(sema) > 0) { + if (try decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified enums must have no decls", .{}); } @@ -21527,7 +21456,7 @@ fn zirReify( ).?); // Decls - if (try decls_val.sliceLen(sema) > 0) { + if (try decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified opaque must have no decls", .{}); } @@ -21575,7 +21504,7 @@ fn zirReify( try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), ).?); - if (try decls_val.sliceLen(sema) > 0) { + if (try decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified unions must have no decls", .{}); } const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); @@ -21934,7 +21863,7 @@ fn reifyUnion( field_ty.* = field_type_val.toIntern(); if (any_aligns) { - const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntAdvanced(sema); + const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntSema(mod); if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) { // TODO: better source location return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align}); @@ -21979,7 +21908,7 @@ fn reifyUnion( field_ty.* = field_type_val.toIntern(); if (any_aligns) { - const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntAdvanced(sema); + const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntSema(mod); if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) { // TODO: better source location return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align}); @@ -22036,6 +21965,7 @@ fn reifyUnion( loaded_union.flagsPtr(ip).status = .have_field_types; try mod.finalizeAnonDecl(new_decl_index); + try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none)); } @@ -22109,7 +22039,7 @@ fn reifyStruct( if (field_is_comptime) any_comptime_fields = true; if (field_default_value != .none) any_default_inits = true; - switch (try field_alignment_val.orderAgainstZeroAdvanced(mod, sema)) { + switch (try field_alignment_val.orderAgainstZeroAdvanced(mod, .sema)) { .eq => {}, .gt => any_aligned_fields = true, .lt => unreachable, @@ -22192,7 +22122,7 @@ fn reifyStruct( return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const byte_align = try field_alignment_val.toUnsignedIntAdvanced(sema); + const byte_align = try field_alignment_val.toUnsignedIntSema(mod); if (byte_align == 0) { if (layout != .@"packed") { struct_type.field_aligns.get(ip)[field_idx] = .none; @@ -22278,7 +22208,7 @@ fn reifyStruct( var fields_bit_sum: u64 = 0; for (0..struct_type.field_types.len) |field_idx| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_idx]); - sema.resolveTypeLayout(field_ty) catch |err| switch (err) { + field_ty.resolveLayout(mod) catch |err| switch (err) { error.AnalysisFail => { const msg = sema.err orelse return err; try sema.errNote(src, msg, "while checking a field of this struct", .{}); @@ -22300,11 +22230,12 @@ fn reifyStruct( } try mod.finalizeAnonDecl(new_decl_index); + try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none)); } fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) CompileError!Air.Inst.Ref { - const va_list_ty = try sema.getBuiltinType("VaList"); + const va_list_ty = try sema.mod.getBuiltinType("VaList"); const va_list_ptr = try sema.mod.singleMutPtrType(va_list_ty); const inst = try sema.resolveInst(zir_ref); @@ -22343,7 +22274,7 @@ fn zirCVaCopy(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) const va_list_src = block.builtinCallArgSrc(extra.node, 0); const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand); - const va_list_ty = try sema.getBuiltinType("VaList"); + const va_list_ty = try sema.mod.getBuiltinType("VaList"); try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.c_va_copy, va_list_ty, va_list_ref); @@ -22363,7 +22294,7 @@ fn zirCVaEnd(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const src = block.nodeOffset(@bitCast(extended.operand)); - const va_list_ty = try sema.getBuiltinType("VaList"); + const va_list_ty = try sema.mod.getBuiltinType("VaList"); try sema.requireRuntimeBlock(block, src, null); return block.addInst(.{ .tag = .c_va_start, @@ -22497,7 +22428,7 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro _ = try sema.checkIntType(block, operand_src, operand_scalar_ty); if (try sema.resolveValue(operand)) |operand_val| { - const result_val = try operand_val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, mod, sema); + const result_val = try operand_val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, mod, .sema); return Air.internedToRef(result_val.toIntern()); } else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeFloat) { return sema.failWithNeededComptime(block, operand_src, .{ @@ -22545,7 +22476,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! try sema.checkPtrType(block, src, ptr_ty, true); const elem_ty = ptr_ty.elemType2(mod); - const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema); + const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, .sema); if (ptr_ty.isSlice(mod)) { const msg = msg: { @@ -22644,7 +22575,7 @@ fn ptrFromIntVal( } return sema.failWithUseOfUndef(block, operand_src); } - const addr = try operand_val.toUnsignedIntAdvanced(sema); + const addr = try operand_val.toUnsignedIntSema(zcu); if (!ptr_ty.isAllowzeroPtr(zcu) and addr == 0) return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(zcu)}); if (addr != 0 and ptr_align != .none and !ptr_align.check(addr)) @@ -22842,8 +22773,8 @@ fn ptrCastFull( const src_info = operand_ty.ptrInfo(mod); const dest_info = dest_ty.ptrInfo(mod); - try sema.resolveTypeLayout(Type.fromInterned(src_info.child)); - try sema.resolveTypeLayout(Type.fromInterned(dest_info.child)); + try Type.fromInterned(src_info.child).resolveLayout(mod); + try Type.fromInterned(dest_info.child).resolveLayout(mod); const src_slice_like = src_info.flags.size == .Slice or (src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(mod) == .Array); @@ -23091,7 +23022,7 @@ fn ptrCastFull( // Only convert to a many-pointer at first var info = dest_info; info.flags.size = .Many; - const ty = try sema.ptrType(info); + const ty = try mod.ptrTypeSema(info); if (dest_ty.zigTypeTag(mod) == .Optional) { break :blk try mod.optionalType(ty.toIntern()); } else { @@ -23109,7 +23040,7 @@ fn ptrCastFull( return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}); } if (dest_align.compare(.gt, src_align)) { - if (try ptr_val.getUnsignedIntAdvanced(mod, null)) |addr| { + if (try ptr_val.getUnsignedIntAdvanced(mod, .sema)) |addr| { if (!dest_align.check(addr)) { return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, @@ -23176,7 +23107,7 @@ fn ptrCastFull( // We can't change address spaces with a bitcast, so this requires two instructions var intermediate_info = src_info; intermediate_info.flags.address_space = dest_info.flags.address_space; - const intermediate_ptr_ty = try sema.ptrType(intermediate_info); + const intermediate_ptr_ty = try mod.ptrTypeSema(intermediate_info); const intermediate_ty = if (dest_ptr_ty.zigTypeTag(mod) == .Optional) blk: { break :blk try mod.optionalType(intermediate_ptr_ty.toIntern()); } else intermediate_ptr_ty; @@ -23233,7 +23164,7 @@ fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst if (flags.volatile_cast) ptr_info.flags.is_volatile = false; const dest_ty = blk: { - const dest_ty = try sema.ptrType(ptr_info); + const dest_ty = try mod.ptrTypeSema(ptr_info); if (operand_ty.zigTypeTag(mod) == .Optional) { break :blk try mod.optionalType(dest_ty.toIntern()); } @@ -23523,7 +23454,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeLayout(ty); + try ty.resolveLayout(mod); switch (ty.zigTypeTag(mod)) { .Struct => {}, else => return sema.fail(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(mod)}), @@ -23766,7 +23697,7 @@ fn checkAtomicPtrOperand( const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) { .Pointer => ptr_ty.ptrInfo(mod), else => { - const wanted_ptr_ty = try sema.ptrType(wanted_ptr_data); + const wanted_ptr_ty = try mod.ptrTypeSema(wanted_ptr_data); _ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); unreachable; }, @@ -23776,7 +23707,7 @@ fn checkAtomicPtrOperand( wanted_ptr_data.flags.is_allowzero = ptr_data.flags.is_allowzero; wanted_ptr_data.flags.is_volatile = ptr_data.flags.is_volatile; - const wanted_ptr_ty = try sema.ptrType(wanted_ptr_data); + const wanted_ptr_ty = try mod.ptrTypeSema(wanted_ptr_data); const casted_ptr = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); return casted_ptr; @@ -23953,7 +23884,7 @@ fn resolveExportOptions( const mod = sema.mod; const gpa = sema.gpa; const ip = &mod.intern_pool; - const export_options_ty = try sema.getBuiltinType("ExportOptions"); + const export_options_ty = try mod.getBuiltinType("ExportOptions"); const air_ref = try sema.resolveInst(zir_ref); const options = try sema.coerce(block, export_options_ty, air_ref, src); @@ -24017,7 +23948,7 @@ fn resolveBuiltinEnum( reason: NeededComptimeReason, ) CompileError!@field(std.builtin, name) { const mod = sema.mod; - const ty = try sema.getBuiltinType(name); + const ty = try mod.getBuiltinType(name); const air_ref = try sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, ty, air_ref, src); const val = try sema.resolveConstDefinedValue(block, src, coerced, reason); @@ -24777,7 +24708,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const extra = sema.code.extraData(Zir.Inst.BuiltinCall, inst_data.payload_index).data; const func = try sema.resolveInst(extra.callee); - const modifier_ty = try sema.getBuiltinType("CallModifier"); + const modifier_ty = try mod.getBuiltinType("CallModifier"); const air_ref = try sema.resolveInst(extra.modifier); const modifier_ref = try sema.coerce(block, modifier_ty, air_ref, modifier_src); const modifier_val = try sema.resolveConstDefinedValue(block, modifier_src, modifier_ref, .{ @@ -24881,7 +24812,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins .Struct, .Union => {}, else => return sema.fail(block, inst_src, "expected pointer to struct or union type, found '{}'", .{parent_ptr_ty.fmt(zcu)}), } - try sema.resolveTypeLayout(parent_ty); + try parent_ty.resolveLayout(zcu); const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, .{ .needed_comptime_reason = "field name must be comptime-known", @@ -24912,7 +24843,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins var actual_parent_ptr_info: InternPool.Key.PtrType = .{ .child = parent_ty.toIntern(), .flags = .{ - .alignment = try parent_ptr_ty.ptrAlignmentAdvanced(zcu, sema), + .alignment = try parent_ptr_ty.ptrAlignmentAdvanced(zcu, .sema), .is_const = field_ptr_info.flags.is_const, .is_volatile = field_ptr_info.flags.is_volatile, .is_allowzero = field_ptr_info.flags.is_allowzero, @@ -24924,7 +24855,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins var actual_field_ptr_info: InternPool.Key.PtrType = .{ .child = field_ty.toIntern(), .flags = .{ - .alignment = try field_ptr_ty.ptrAlignmentAdvanced(zcu, sema), + .alignment = try field_ptr_ty.ptrAlignmentAdvanced(zcu, .sema), .is_const = field_ptr_info.flags.is_const, .is_volatile = field_ptr_info.flags.is_volatile, .is_allowzero = field_ptr_info.flags.is_allowzero, @@ -24935,12 +24866,13 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins switch (parent_ty.containerLayout(zcu)) { .auto => { actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict( - if (zcu.typeToStruct(parent_ty)) |struct_obj| try sema.structFieldAlignment( + if (zcu.typeToStruct(parent_ty)) |struct_obj| try zcu.structFieldAlignmentAdvanced( struct_obj.fieldAlign(ip, field_index), field_ty, struct_obj.layout, + .sema, ) else if (zcu.typeToUnion(parent_ty)) |union_obj| - try sema.unionFieldAlignment(union_obj, field_index) + try zcu.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema) else actual_field_ptr_info.flags.alignment, ); @@ -24970,9 +24902,9 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins }, } - const actual_field_ptr_ty = try sema.ptrType(actual_field_ptr_info); + const actual_field_ptr_ty = try zcu.ptrTypeSema(actual_field_ptr_info); const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, field_ptr_src); - const actual_parent_ptr_ty = try sema.ptrType(actual_parent_ptr_info); + const actual_parent_ptr_ty = try zcu.ptrTypeSema(actual_parent_ptr_info); const result = if (try sema.resolveDefinedValue(block, field_ptr_src, casted_field_ptr)) |field_ptr_val| result: { switch (parent_ty.zigTypeTag(zcu)) { @@ -25032,7 +24964,6 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins break :result try sema.coerce(block, actual_parent_ptr_ty, Air.internedToRef(field.base), inst_src); } else result: { try sema.requireRuntimeBlock(block, inst_src, field_ptr_src); - try sema.queueFullTypeResolution(parent_ty); break :result try block.addInst(.{ .tag = .field_parent_ptr, .data = .{ .ty_pl = .{ @@ -25345,7 +25276,7 @@ fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !A // Already an array pointer. return ptr; } - const new_ty = try sema.ptrType(.{ + const new_ty = try mod.ptrTypeSema(.{ .child = (try mod.arrayType(.{ .len = len, .sentinel = info.sentinel, @@ -25444,7 +25375,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: { if (!sema.isComptimeMutablePtr(dest_ptr_val)) break :rs dest_src; if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| { - const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, sema)).?; + const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, .sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); for (0..len) |i| { const elem_index = try mod.intRef(Type.usize, i); @@ -25503,7 +25434,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void var new_dest_ptr = dest_ptr; var new_src_ptr = src_ptr; if (len_val) |val| { - const len = try val.toUnsignedIntAdvanced(sema); + const len = try val.toUnsignedIntSema(mod); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. return; @@ -25550,7 +25481,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void assert(dest_manyptr_ty_key.flags.size == .One); dest_manyptr_ty_key.child = dest_elem_ty.toIntern(); dest_manyptr_ty_key.flags.size = .Many; - break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(dest_manyptr_ty_key), new_dest_ptr, dest_src); + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(dest_manyptr_ty_key), new_dest_ptr, dest_src); } else new_dest_ptr; const new_src_ptr_ty = sema.typeOf(new_src_ptr); @@ -25561,7 +25492,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void assert(src_manyptr_ty_key.flags.size == .One); src_manyptr_ty_key.child = src_elem_ty.toIntern(); src_manyptr_ty_key.flags.size = .Many; - break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(src_manyptr_ty_key), new_src_ptr, src_src); + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(src_manyptr_ty_key), new_src_ptr, src_src); } else new_src_ptr; // ok1: dest >= src + len @@ -25628,7 +25559,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr) orelse break :rs dest_src; const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, "len", .no_embedded_nulls), dest_src); const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src; - const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, sema)).?; + const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, .sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. @@ -25808,7 +25739,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (val.isGenericPoison()) { break :blk null; } - const alignment = try sema.validateAlignAllowZero(block, align_src, try val.toUnsignedIntAdvanced(sema)); + const alignment = try sema.validateAlignAllowZero(block, align_src, try val.toUnsignedIntSema(mod)); const default = target_util.defaultFunctionAlignment(target); break :blk if (alignment == default) .none else alignment; } else if (extra.data.bits.has_align_ref) blk: { @@ -25828,7 +25759,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A error.GenericPoison => break :blk null, else => |e| return e, }; - const alignment = try sema.validateAlignAllowZero(block, align_src, try align_val.toUnsignedIntAdvanced(sema)); + const alignment = try sema.validateAlignAllowZero(block, align_src, try align_val.toUnsignedIntSema(mod)); const default = target_util.defaultFunctionAlignment(target); break :blk if (alignment == default) .none else alignment; } else .none; @@ -25904,7 +25835,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const body = sema.code.bodySlice(extra_index, body_len); extra_index += body.len; - const cc_ty = try sema.getBuiltinType("CallingConvention"); + const cc_ty = try mod.getBuiltinType("CallingConvention"); const val = try sema.resolveGenericBody(block, cc_src, body, inst, cc_ty, .{ .needed_comptime_reason = "calling convention must be comptime-known", }); @@ -26117,7 +26048,7 @@ fn resolvePrefetchOptions( const mod = sema.mod; const gpa = sema.gpa; const ip = &mod.intern_pool; - const options_ty = try sema.getBuiltinType("PrefetchOptions"); + const options_ty = try mod.getBuiltinType("PrefetchOptions"); const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src); const rw_src = block.src(.{ .init_field_rw = src.offset.node_offset_builtin_call_arg.builtin_call_node }); @@ -26141,7 +26072,7 @@ fn resolvePrefetchOptions( return std.builtin.PrefetchOptions{ .rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val), - .locality = @intCast(try locality_val.toUnsignedIntAdvanced(sema)), + .locality = @intCast(try locality_val.toUnsignedIntSema(mod)), .cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val), }; } @@ -26189,7 +26120,7 @@ fn resolveExternOptions( const gpa = sema.gpa; const ip = &mod.intern_pool; const options_inst = try sema.resolveInst(zir_ref); - const extern_options_ty = try sema.getBuiltinType("ExternOptions"); + const extern_options_ty = try mod.getBuiltinType("ExternOptions"); const options = try sema.coerce(block, extern_options_ty, options_inst, src); const name_src = block.src(.{ .init_field_name = src.offset.node_offset_builtin_call_arg.builtin_call_node }); @@ -26440,7 +26371,7 @@ fn explainWhyTypeIsComptime( var type_set = TypeSet{}; defer type_set.deinit(sema.gpa); - try sema.resolveTypeFully(ty); + try ty.resolveFully(sema.mod); return sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty, &type_set); } @@ -26567,7 +26498,7 @@ const ExternPosition = enum { /// Returns true if `ty` is allowed in extern types. /// Does *NOT* require `ty` to be resolved in any way. -/// Calls `resolveTypeLayout` for packed containers. +/// Calls `resolveLayout` for packed containers. fn validateExternType( sema: *Sema, ty: Type, @@ -26618,7 +26549,7 @@ fn validateExternType( .Struct, .Union => switch (ty.containerLayout(mod)) { .@"extern" => return true, .@"packed" => { - const bit_size = try ty.bitSizeAdvanced(mod, sema); + const bit_size = try ty.bitSizeAdvanced(mod, .sema); switch (bit_size) { 0, 8, 16, 32, 64, 128 => return true, else => return false, @@ -26796,11 +26727,11 @@ fn explainWhyTypeIsNotPacked( } } -fn prepareSimplePanic(sema: *Sema, block: *Block) !void { +fn prepareSimplePanic(sema: *Sema) !void { const mod = sema.mod; if (mod.panic_func_index == .none) { - const decl_index = (try sema.getBuiltinDecl(block, "panic")); + const decl_index = (try mod.getBuiltinDecl("panic")); // decl_index may be an alias; we must find the decl that actually // owns the function. try sema.ensureDeclAnalyzed(decl_index); @@ -26813,10 +26744,10 @@ fn prepareSimplePanic(sema: *Sema, block: *Block) !void { } if (mod.null_stack_trace == .none) { - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const target = mod.getTarget(); - const ptr_stack_trace_ty = try sema.ptrType(.{ + const ptr_stack_trace_ty = try mod.ptrTypeSema(.{ .child = stack_trace_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .global_constant), @@ -26838,9 +26769,9 @@ fn preparePanicId(sema: *Sema, block: *Block, panic_id: Module.PanicId) !InternP const gpa = sema.gpa; if (mod.panic_messages[@intFromEnum(panic_id)].unwrap()) |x| return x; - try sema.prepareSimplePanic(block); + try sema.prepareSimplePanic(); - const panic_messages_ty = try sema.getBuiltinType("panic_messages"); + const panic_messages_ty = try mod.getBuiltinType("panic_messages"); const msg_decl_index = (sema.namespaceLookup( block, LazySrcLoc.unneeded, @@ -26946,7 +26877,7 @@ fn panicWithMsg(sema: *Sema, block: *Block, src: LazySrcLoc, msg_inst: Air.Inst. return; } - try sema.prepareSimplePanic(block); + try sema.prepareSimplePanic(); const panic_func = mod.funcInfo(mod.panic_func_index); const panic_fn = try sema.analyzeDeclVal(block, src, panic_func.owner_decl); @@ -26992,7 +26923,7 @@ fn panicUnwrapError( if (!sema.mod.backendSupportsFeature(.panic_unwrap_error)) { _ = try fail_block.addNoOp(.trap); } else { - const panic_fn = try sema.getBuiltin("panicUnwrapError"); + const panic_fn = try sema.mod.getBuiltin("panicUnwrapError"); const err = try fail_block.addTyOp(unwrap_err_tag, Type.anyerror, operand); const err_return_trace = try sema.getErrorReturnTrace(&fail_block); const args: [2]Air.Inst.Ref = .{ err_return_trace, err }; @@ -27051,7 +26982,7 @@ fn panicSentinelMismatch( const actual_sentinel = if (ptr_ty.isSlice(mod)) try parent_block.addBinOp(.slice_elem_val, ptr, sentinel_index) else blk: { - const elem_ptr_ty = try sema.elemPtrType(ptr_ty, null); + const elem_ptr_ty = try ptr_ty.elemPtrType(null, mod); const sentinel_ptr = try parent_block.addPtrElemPtr(ptr, sentinel_index, elem_ptr_ty); break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr); }; @@ -27069,7 +27000,7 @@ fn panicSentinelMismatch( } else if (sentinel_ty.isSelfComparable(mod, true)) try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel) else { - const panic_fn = try sema.getBuiltin("checkNonScalarSentinel"); + const panic_fn = try mod.getBuiltin("checkNonScalarSentinel"); const args: [2]Air.Inst.Ref = .{ expected_sentinel, actual_sentinel }; try sema.callBuiltin(parent_block, src, panic_fn, .auto, &args, .@"safety check"); return; @@ -27108,7 +27039,7 @@ fn safetyCheckFormatted( if (!sema.mod.backendSupportsFeature(.safety_check_formatted)) { _ = try fail_block.addNoOp(.trap); } else { - const panic_fn = try sema.getBuiltin(func); + const panic_fn = try sema.mod.getBuiltin(func); try sema.callBuiltin(&fail_block, src, panic_fn, .auto, args, .@"safety check"); } try sema.addSafetyCheckExtra(parent_block, ok, &fail_block); @@ -27170,7 +27101,7 @@ fn fieldVal( return Air.internedToRef((try mod.intValue(Type.usize, inner_ty.arrayLen(mod))).toIntern()); } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); - const result_ty = try sema.ptrType(.{ + const result_ty = try mod.ptrTypeSema(.{ .child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(), .sentinel = if (inner_ty.sentinel(mod)) |s| s.toIntern() else .none, .flags = .{ @@ -27267,7 +27198,7 @@ fn fieldVal( if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { return inst; } - try sema.resolveTypeFields(child_type); + try child_type.resolveFields(mod); if (child_type.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| { const field_index: u32 = @intCast(field_index_usize); @@ -27361,7 +27292,7 @@ fn fieldPtr( return anonDeclRef(sema, int_val.toIntern()); } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); - const new_ptr_ty = try sema.ptrType(.{ + const new_ptr_ty = try mod.ptrTypeSema(.{ .child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(), .sentinel = if (object_ty.sentinel(mod)) |s| s.toIntern() else .none, .flags = .{ @@ -27376,7 +27307,7 @@ fn fieldPtr( .packed_offset = ptr_info.packed_offset, }); const ptr_ptr_info = object_ptr_ty.ptrInfo(mod); - const result_ty = try sema.ptrType(.{ + const result_ty = try mod.ptrTypeSema(.{ .child = new_ptr_ty.toIntern(), .sentinel = if (object_ptr_ty.sentinel(mod)) |s| s.toIntern() else .none, .flags = .{ @@ -27410,7 +27341,7 @@ fn fieldPtr( if (field_name.eqlSlice("ptr", ip)) { const slice_ptr_ty = inner_ty.slicePtrFieldType(mod); - const result_ty = try sema.ptrType(.{ + const result_ty = try mod.ptrTypeSema(.{ .child = slice_ptr_ty.toIntern(), .flags = .{ .is_const = !attr_ptr_ty.ptrIsMutable(mod), @@ -27420,7 +27351,7 @@ fn fieldPtr( }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return Air.internedToRef((try val.ptrField(Value.slice_ptr_index, sema)).toIntern()); + return Air.internedToRef((try val.ptrField(Value.slice_ptr_index, mod)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); @@ -27428,7 +27359,7 @@ fn fieldPtr( try sema.checkKnownAllocPtr(block, inner_ptr, field_ptr); return field_ptr; } else if (field_name.eqlSlice("len", ip)) { - const result_ty = try sema.ptrType(.{ + const result_ty = try mod.ptrTypeSema(.{ .child = .usize_type, .flags = .{ .is_const = !attr_ptr_ty.ptrIsMutable(mod), @@ -27438,7 +27369,7 @@ fn fieldPtr( }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return Air.internedToRef((try val.ptrField(Value.slice_len_index, sema)).toIntern()); + return Air.internedToRef((try val.ptrField(Value.slice_len_index, mod)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); @@ -27506,7 +27437,7 @@ fn fieldPtr( if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { return inst; } - try sema.resolveTypeFields(child_type); + try child_type.resolveFields(mod); if (child_type.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| { const field_index_u32: u32 = @intCast(field_index); @@ -27601,7 +27532,7 @@ fn fieldCallBind( find_field: { switch (concrete_ty.zigTypeTag(mod)) { .Struct => { - try sema.resolveTypeFields(concrete_ty); + try concrete_ty.resolveFields(mod); if (mod.typeToStruct(concrete_ty)) |struct_type| { const field_index = struct_type.nameIndex(ip, field_name) orelse break :find_field; @@ -27627,7 +27558,7 @@ fn fieldCallBind( } }, .Union => { - try sema.resolveTypeFields(concrete_ty); + try concrete_ty.resolveFields(mod); const union_obj = mod.typeToUnion(concrete_ty).?; _ = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse break :find_field; const field_ptr = try unionFieldPtr(sema, block, src, object_ptr, field_name, field_name_src, concrete_ty, false); @@ -27737,7 +27668,7 @@ fn finishFieldCallBind( object_ptr: Air.Inst.Ref, ) CompileError!ResolvedFieldCallee { const mod = sema.mod; - const ptr_field_ty = try sema.ptrType(.{ + const ptr_field_ty = try mod.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = !ptr_ty.ptrIsMutable(mod), @@ -27748,14 +27679,14 @@ fn finishFieldCallBind( const container_ty = ptr_ty.childType(mod); if (container_ty.zigTypeTag(mod) == .Struct) { if (container_ty.structFieldIsComptime(field_index, mod)) { - try sema.resolveStructFieldInits(container_ty); + try container_ty.resolveStructFieldInits(mod); const default_val = (try container_ty.structFieldValueComptime(mod, field_index)).?; return .{ .direct = Air.internedToRef(default_val.toIntern()) }; } } if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| { - const ptr_val = try struct_ptr_val.ptrField(field_index, sema); + const ptr_val = try struct_ptr_val.ptrField(field_index, mod); const pointer = Air.internedToRef(ptr_val.toIntern()); return .{ .direct = try sema.analyzeLoad(block, src, pointer, src) }; } @@ -27831,8 +27762,8 @@ fn structFieldPtr( const ip = &mod.intern_pool; assert(struct_ty.zigTypeTag(mod) == .Struct); - try sema.resolveTypeFields(struct_ty); - try sema.resolveStructLayout(struct_ty); + try struct_ty.resolveFields(mod); + try struct_ty.resolveLayout(mod); if (struct_ty.isTuple(mod)) { if (field_name.eqlSlice("len", ip)) { @@ -27871,7 +27802,7 @@ fn structFieldPtrByIndex( } if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { - const val = try struct_ptr_val.ptrField(field_index, sema); + const val = try struct_ptr_val.ptrField(field_index, mod); return Air.internedToRef(val.toIntern()); } @@ -27915,10 +27846,11 @@ fn structFieldPtrByIndex( @enumFromInt(@min(@intFromEnum(parent_align), @ctz(field_offset))); } else { // Our alignment is capped at the field alignment. - const field_align = try sema.structFieldAlignment( + const field_align = try mod.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, field_index), Type.fromInterned(field_ty), struct_type.layout, + .sema, ); ptr_ty_data.flags.alignment = if (struct_ptr_ty_info.flags.alignment == .none) field_align @@ -27926,10 +27858,10 @@ fn structFieldPtrByIndex( field_align.min(parent_align); } - const ptr_field_ty = try sema.ptrType(ptr_ty_data); + const ptr_field_ty = try mod.ptrTypeSema(ptr_ty_data); if (struct_type.fieldIsComptime(ip, field_index)) { - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const val = try mod.intern(.{ .ptr = .{ .ty = ptr_field_ty.toIntern(), .base_addr = .{ .comptime_field = struct_type.field_inits.get(ip)[field_index] }, @@ -27955,7 +27887,7 @@ fn structFieldVal( const ip = &mod.intern_pool; assert(struct_ty.zigTypeTag(mod) == .Struct); - try sema.resolveTypeFields(struct_ty); + try struct_ty.resolveFields(mod); switch (ip.indexToKey(struct_ty.toIntern())) { .struct_type => { @@ -27966,7 +27898,7 @@ fn structFieldVal( const field_index = struct_type.nameIndex(ip, field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_ty, struct_type, field_name_src, field_name); if (struct_type.fieldIsComptime(ip, field_index)) { - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); return Air.internedToRef(struct_type.field_inits.get(ip)[field_index]); } @@ -27983,7 +27915,7 @@ fn structFieldVal( } try sema.requireRuntimeBlock(block, src, null); - try sema.resolveTypeLayout(field_ty); + try field_ty.resolveLayout(mod); return block.addStructFieldVal(struct_byval, field_index, field_ty); }, .anon_struct_type => |anon_struct| { @@ -28050,7 +27982,7 @@ fn tupleFieldValByIndex( const field_ty = tuple_ty.structFieldType(field_index, mod); if (tuple_ty.structFieldIsComptime(field_index, mod)) - try sema.resolveStructFieldInits(tuple_ty); + try tuple_ty.resolveStructFieldInits(mod); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return Air.internedToRef(default_value.toIntern()); } @@ -28071,7 +28003,7 @@ fn tupleFieldValByIndex( } try sema.requireRuntimeBlock(block, src, null); - try sema.resolveTypeLayout(field_ty); + try field_ty.resolveLayout(mod); return block.addStructFieldVal(tuple_byval, field_index, field_ty); } @@ -28092,11 +28024,11 @@ fn unionFieldPtr( const union_ptr_ty = sema.typeOf(union_ptr); const union_ptr_info = union_ptr_ty.ptrInfo(mod); - try sema.resolveTypeFields(union_ty); + try union_ty.resolveFields(mod); const union_obj = mod.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - const ptr_field_ty = try sema.ptrType(.{ + const ptr_field_ty = try mod.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = union_ptr_info.flags.is_const, @@ -28107,7 +28039,7 @@ fn unionFieldPtr( union_ptr_info.flags.alignment else try sema.typeAbiAlignment(union_ty); - const field_align = try sema.unionFieldAlignment(union_obj, field_index); + const field_align = try mod.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema); break :blk union_align.min(field_align); } else union_ptr_info.flags.alignment, }, @@ -28163,7 +28095,7 @@ fn unionFieldPtr( }, .@"packed", .@"extern" => {}, } - const field_ptr_val = try union_ptr_val.ptrField(field_index, sema); + const field_ptr_val = try union_ptr_val.ptrField(field_index, mod); return Air.internedToRef(field_ptr_val.toIntern()); } @@ -28198,7 +28130,7 @@ fn unionFieldVal( const ip = &zcu.intern_pool; assert(union_ty.zigTypeTag(zcu) == .Union); - try sema.resolveTypeFields(union_ty); + try union_ty.resolveFields(zcu); const union_obj = zcu.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); @@ -28237,7 +28169,7 @@ fn unionFieldVal( .@"packed" => if (tag_matches) { // Fast path - no need to use bitcast logic. return Air.internedToRef(un.val); - } else if (try sema.bitCastVal(union_val, field_ty, 0, try union_ty.bitSizeAdvanced(zcu, sema), 0)) |field_val| { + } else if (try sema.bitCastVal(union_val, field_ty, 0, try union_ty.bitSizeAdvanced(zcu, .sema), 0)) |field_val| { return Air.internedToRef(field_val.toIntern()); }, } @@ -28256,7 +28188,7 @@ fn unionFieldVal( _ = try block.addNoOp(.unreach); return .unreachable_value; } - try sema.resolveTypeLayout(field_ty); + try field_ty.resolveLayout(zcu); return block.addStructFieldVal(union_byval, field_index, field_ty); } @@ -28287,7 +28219,7 @@ fn elemPtr( const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{ .needed_comptime_reason = "tuple field access index must be comptime-known", }); - const index: u32 = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod)); break :blk try sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init); }, else => { @@ -28325,11 +28257,11 @@ fn elemPtrOneLayerOnly( const runtime_src = rs: { const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); - const elem_ptr = try ptr_val.ptrElem(index, sema); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); + const elem_ptr = try ptr_val.ptrElem(index, mod); return Air.internedToRef(elem_ptr.toIntern()); }; - const result_ty = try sema.elemPtrType(indexable_ty, null); + const result_ty = try indexable_ty.elemPtrType(null, mod); try sema.requireRuntimeBlock(block, src, runtime_src); return block.addPtrElemPtr(indexable, elem_index, result_ty); @@ -28343,7 +28275,7 @@ fn elemPtrOneLayerOnly( const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{ .needed_comptime_reason = "tuple field access index must be comptime-known", }); - const index: u32 = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod)); break :blk try sema.tupleFieldPtr(block, indexable_src, indexable, elem_index_src, index, false); }, else => unreachable, // Guaranteed by checkIndexable @@ -28383,12 +28315,12 @@ fn elemVal( const runtime_src = rs: { const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); const elem_ty = indexable_ty.elemType2(mod); const many_ptr_ty = try mod.manyConstPtrType(elem_ty); const many_ptr_val = try mod.getCoerced(indexable_val, many_ptr_ty); const elem_ptr_ty = try mod.singleConstPtrType(elem_ty); - const elem_ptr_val = try many_ptr_val.ptrElem(index, sema); + const elem_ptr_val = try many_ptr_val.ptrElem(index, mod); if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return Air.internedToRef((try mod.getCoerced(elem_val, elem_ty)).toIntern()); } @@ -28404,7 +28336,7 @@ fn elemVal( if (inner_ty.zigTypeTag(mod) != .Array) break :arr_sent; const sentinel = inner_ty.sentinel(mod) orelse break :arr_sent; const index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index) orelse break :arr_sent; - const index = try sema.usizeCast(block, src, try index_val.toUnsignedIntAdvanced(sema)); + const index = try sema.usizeCast(block, src, try index_val.toUnsignedIntSema(mod)); if (index != inner_ty.arrayLen(mod)) break :arr_sent; return Air.internedToRef(sentinel.toIntern()); } @@ -28422,7 +28354,7 @@ fn elemVal( const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{ .needed_comptime_reason = "tuple field access index must be comptime-known", }); - const index: u32 = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod)); return sema.tupleField(block, indexable_src, indexable, elem_index_src, index); }, else => unreachable, @@ -28467,7 +28399,7 @@ fn tupleFieldPtr( const mod = sema.mod; const tuple_ptr_ty = sema.typeOf(tuple_ptr); const tuple_ty = tuple_ptr_ty.childType(mod); - try sema.resolveTypeFields(tuple_ty); + try tuple_ty.resolveFields(mod); const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { @@ -28481,7 +28413,7 @@ fn tupleFieldPtr( } const field_ty = tuple_ty.structFieldType(field_index, mod); - const ptr_field_ty = try sema.ptrType(.{ + const ptr_field_ty = try mod.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = !tuple_ptr_ty.ptrIsMutable(mod), @@ -28491,7 +28423,7 @@ fn tupleFieldPtr( }); if (tuple_ty.structFieldIsComptime(field_index, mod)) - try sema.resolveStructFieldInits(tuple_ty); + try tuple_ty.resolveStructFieldInits(mod); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { return Air.internedToRef((try mod.intern(.{ .ptr = .{ @@ -28502,7 +28434,7 @@ fn tupleFieldPtr( } if (try sema.resolveValue(tuple_ptr)) |tuple_ptr_val| { - const field_ptr_val = try tuple_ptr_val.ptrField(field_index, sema); + const field_ptr_val = try tuple_ptr_val.ptrField(field_index, mod); return Air.internedToRef(field_ptr_val.toIntern()); } @@ -28524,7 +28456,7 @@ fn tupleField( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const tuple_ty = sema.typeOf(tuple); - try sema.resolveTypeFields(tuple_ty); + try tuple_ty.resolveFields(mod); const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { @@ -28540,7 +28472,7 @@ fn tupleField( const field_ty = tuple_ty.structFieldType(field_index, mod); if (tuple_ty.structFieldIsComptime(field_index, mod)) - try sema.resolveStructFieldInits(tuple_ty); + try tuple_ty.resolveStructFieldInits(mod); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return Air.internedToRef(default_value.toIntern()); // comptime field } @@ -28553,7 +28485,7 @@ fn tupleField( try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src); try sema.requireRuntimeBlock(block, tuple_src, null); - try sema.resolveTypeLayout(field_ty); + try field_ty.resolveLayout(mod); return block.addStructFieldVal(tuple, field_index, field_ty); } @@ -28583,7 +28515,7 @@ fn elemValArray( const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); if (maybe_index_val) |index_val| { - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); if (array_sent) |s| { if (index == array_len) { return Air.internedToRef(s.toIntern()); @@ -28599,7 +28531,7 @@ fn elemValArray( return mod.undefRef(elem_ty); } if (maybe_index_val) |index_val| { - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); const elem_val = try array_val.elemValue(mod, index); return Air.internedToRef(elem_val.toIntern()); } @@ -28621,7 +28553,6 @@ fn elemValArray( return Air.internedToRef(elem_val.toIntern()); try sema.requireRuntimeBlock(block, src, runtime_src); - try sema.queueFullTypeResolution(array_ty); return block.addBinOp(.array_elem_val, array, elem_index); } @@ -28650,7 +28581,7 @@ fn elemPtrArray( const maybe_undef_array_ptr_val = try sema.resolveValue(array_ptr); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntAdvanced(sema)); + const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(mod)); if (index >= array_len_s) { const sentinel_label: []const u8 = if (array_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label }); @@ -28658,14 +28589,14 @@ fn elemPtrArray( break :o index; } else null; - const elem_ptr_ty = try sema.elemPtrType(array_ptr_ty, offset); + const elem_ptr_ty = try array_ptr_ty.elemPtrType(offset, mod); if (maybe_undef_array_ptr_val) |array_ptr_val| { if (array_ptr_val.isUndef(mod)) { return mod.undefRef(elem_ptr_ty); } if (offset) |index| { - const elem_ptr = try array_ptr_val.ptrElem(index, sema); + const elem_ptr = try array_ptr_val.ptrElem(index, mod); return Air.internedToRef(elem_ptr.toIntern()); } } @@ -28710,19 +28641,19 @@ fn elemValSlice( if (maybe_slice_val) |slice_val| { runtime_src = elem_index_src; - const slice_len = try slice_val.sliceLen(sema); + const slice_len = try slice_val.sliceLen(mod); const slice_len_s = slice_len + @intFromBool(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); } if (maybe_index_val) |index_val| { - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); if (index >= slice_len_s) { const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_ty = try sema.elemPtrType(slice_ty, index); - const elem_ptr_val = try slice_val.ptrElem(index, sema); + const elem_ptr_ty = try slice_ty.elemPtrType(index, mod); + const elem_ptr_val = try slice_val.ptrElem(index, mod); if (try sema.pointerDeref(block, slice_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return Air.internedToRef(elem_val.toIntern()); } @@ -28735,13 +28666,12 @@ fn elemValSlice( try sema.requireRuntimeBlock(block, src, runtime_src); if (oob_safety and block.wantSafety()) { const len_inst = if (maybe_slice_val) |slice_val| - try mod.intRef(Type.usize, try slice_val.sliceLen(sema)) + try mod.intRef(Type.usize, try slice_val.sliceLen(mod)) else try block.addTyOp(.slice_len, Type.usize, slice); const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op); } - try sema.queueFullTypeResolution(sema.typeOf(slice)); return block.addBinOp(.slice_elem_val, slice, elem_index); } @@ -28762,17 +28692,17 @@ fn elemPtrSlice( const maybe_undef_slice_val = try sema.resolveValue(slice); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntAdvanced(sema)); + const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(mod)); break :o index; } else null; - const elem_ptr_ty = try sema.elemPtrType(slice_ty, offset); + const elem_ptr_ty = try slice_ty.elemPtrType(offset, mod); if (maybe_undef_slice_val) |slice_val| { if (slice_val.isUndef(mod)) { return mod.undefRef(elem_ptr_ty); } - const slice_len = try slice_val.sliceLen(sema); + const slice_len = try slice_val.sliceLen(mod); const slice_len_s = slice_len + @intFromBool(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); @@ -28782,7 +28712,7 @@ fn elemPtrSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.ptrElem(index, sema); + const elem_ptr_val = try slice_val.ptrElem(index, mod); return Air.internedToRef(elem_ptr_val.toIntern()); } } @@ -28795,7 +28725,7 @@ fn elemPtrSlice( const len_inst = len: { if (maybe_undef_slice_val) |slice_val| if (!slice_val.isUndef(mod)) - break :len try mod.intRef(Type.usize, try slice_val.sliceLen(sema)); + break :len try mod.intRef(Type.usize, try slice_val.sliceLen(mod)); break :len try block.addTyOp(.slice_len, Type.usize, slice); }; const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; @@ -28860,9 +28790,9 @@ fn coerceExtra( if (dest_ty.isGenericPoison()) return inst; const zcu = sema.mod; const dest_ty_src = inst_src; // TODO better source location - try sema.resolveTypeFields(dest_ty); + try dest_ty.resolveFields(zcu); const inst_ty = sema.typeOf(inst); - try sema.resolveTypeFields(inst_ty); + try inst_ty.resolveFields(zcu); const target = zcu.getTarget(); // If the types are the same, we can return the operand. if (dest_ty.eql(inst_ty, zcu)) @@ -28876,7 +28806,6 @@ fn coerceExtra( return sema.coerceInMemory(val, dest_ty); } try sema.requireRuntimeBlock(block, inst_src, null); - try sema.queueFullTypeResolution(dest_ty); const new_val = try block.addBitCast(dest_ty, inst); try sema.checkKnownAllocPtr(block, inst, new_val); return new_val; @@ -29172,7 +29101,7 @@ fn coerceExtra( // empty tuple to zero-length slice // note that this allows coercing to a mutable slice. if (inst_child_ty.structFieldCount(zcu) == 0) { - const align_val = try dest_ty.ptrAlignmentAdvanced(zcu, sema); + const align_val = try dest_ty.ptrAlignmentAdvanced(zcu, .sema); return Air.internedToRef(try zcu.intern(.{ .slice = .{ .ty = dest_ty.toIntern(), .ptr = try zcu.intern(.{ .ptr = .{ @@ -29317,7 +29246,7 @@ fn coerceExtra( } break :int; }; - const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, zcu, sema); + const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, zcu, .sema); // TODO implement this compile error //const int_again_val = try result_val.intFromFloat(sema.arena, inst_ty); //if (!int_again_val.eql(val, inst_ty, zcu)) { @@ -30649,7 +30578,6 @@ fn storePtr2( } try sema.requireRuntimeBlock(block, src, runtime_src); - try sema.queueFullTypeResolution(elem_ty); if (ptr_ty.ptrInfo(mod).flags.vector_index == .runtime) { const ptr_inst = ptr.toIndex().?; @@ -30871,10 +30799,10 @@ fn bitCast( operand_src: ?LazySrcLoc, ) CompileError!Air.Inst.Ref { const zcu = sema.mod; - try sema.resolveTypeLayout(dest_ty); + try dest_ty.resolveLayout(zcu); const old_ty = sema.typeOf(inst); - try sema.resolveTypeLayout(old_ty); + try old_ty.resolveLayout(zcu); const dest_bits = dest_ty.bitSize(zcu); const old_bits = old_ty.bitSize(zcu); @@ -31056,7 +30984,7 @@ fn coerceEnumToUnion( const union_obj = mod.typeToUnion(union_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - try sema.resolveTypeFields(field_ty); + try field_ty.resolveFields(mod); if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(inst_src, "cannot initialize 'noreturn' field of union", .{}); @@ -31469,8 +31397,8 @@ fn coerceTupleToStruct( ) !Air.Inst.Ref { const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeFields(struct_ty); - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveFields(mod); + try struct_ty.resolveStructFieldInits(mod); if (struct_ty.isTupleOrAnonStruct(mod)) { return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src); @@ -31817,7 +31745,7 @@ fn analyzeDeclRefInner(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.Decl }); // TODO: if this is a `decl_ref` of a non-variable decl, only depend on decl type try sema.declareDependency(.{ .decl_val = decl_index }); - const ptr_ty = try sema.ptrType(.{ + const ptr_ty = try mod.ptrTypeSema(.{ .child = decl_val.typeOf(mod).toIntern(), .flags = .{ .alignment = owner_decl.alignment, @@ -31864,14 +31792,14 @@ fn analyzeRef( try sema.requireRuntimeBlock(block, src, null); const address_space = target_util.defaultAddressSpace(mod.getTarget(), .local); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try mod.ptrTypeSema(.{ .child = operand_ty.toIntern(), .flags = .{ .is_const = true, .address_space = address_space, }, }); - const mut_ptr_type = try sema.ptrType(.{ + const mut_ptr_type = try mod.ptrTypeSema(.{ .child = operand_ty.toIntern(), .flags = .{ .address_space = address_space }, }); @@ -31979,7 +31907,7 @@ fn analyzeSliceLen( if (slice_val.isUndef(mod)) { return mod.undefRef(Type.usize); } - return mod.intRef(Type.usize, try slice_val.sliceLen(sema)); + return mod.intRef(Type.usize, try slice_val.sliceLen(mod)); } try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.slice_len, Type.usize, slice_inst); @@ -32347,7 +32275,7 @@ fn analyzeSlice( assert(manyptr_ty_key.flags.size == .One); manyptr_ty_key.child = elem_ty.toIntern(); manyptr_ty_key.flags.size = .Many; - break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(manyptr_ty_key), ptr_or_slice, ptr_src); + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(manyptr_ty_key), ptr_or_slice, ptr_src); } else ptr_or_slice; const start = try sema.coerce(block, Type.usize, uncasted_start, start_src); @@ -32416,7 +32344,7 @@ fn analyzeSlice( return sema.fail(block, src, "slice of undefined", .{}); } const has_sentinel = slice_ty.sentinel(mod) != null; - const slice_len = try slice_val.sliceLen(sema); + const slice_len = try slice_val.sliceLen(mod); const len_plus_sent = slice_len + @intFromBool(has_sentinel); const slice_len_val_with_sentinel = try mod.intValue(Type.usize, len_plus_sent); if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) { @@ -32431,7 +32359,7 @@ fn analyzeSlice( "end index {} out of bounds for slice of length {d}{s}", .{ end_val.fmtValue(mod, sema), - try slice_val.sliceLen(sema), + try slice_val.sliceLen(mod), sentinel_label, }, ); @@ -32504,7 +32432,7 @@ fn analyzeSlice( const many_ptr_ty = try mod.manyConstPtrType(elem_ty); const many_ptr_val = try mod.getCoerced(ptr_val, many_ptr_ty); - const elem_ptr = try many_ptr_val.ptrElem(sentinel_index, sema); + const elem_ptr = try many_ptr_val.ptrElem(sentinel_index, mod); const res = try sema.pointerDerefExtra(block, src, elem_ptr); const actual_sentinel = switch (res) { .runtime_load => break :sentinel_check, @@ -32567,9 +32495,9 @@ fn analyzeSlice( const new_allowzero = new_ptr_ty_info.flags.is_allowzero and sema.typeOf(ptr).ptrSize(mod) != .C; if (opt_new_len_val) |new_len_val| { - const new_len_int = try new_len_val.toUnsignedIntAdvanced(sema); + const new_len_int = try new_len_val.toUnsignedIntSema(mod); - const return_ty = try sema.ptrType(.{ + const return_ty = try mod.ptrTypeSema(.{ .child = (try mod.arrayType(.{ .len = new_len_int, .sentinel = if (sentinel) |s| s.toIntern() else .none, @@ -32631,7 +32559,7 @@ fn analyzeSlice( return sema.fail(block, src, "non-zero length slice of undefined pointer", .{}); } - const return_ty = try sema.ptrType(.{ + const return_ty = try mod.ptrTypeSema(.{ .child = elem_ty.toIntern(), .sentinel = if (sentinel) |s| s.toIntern() else .none, .flags = .{ @@ -32659,7 +32587,7 @@ fn analyzeSlice( if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| { // we don't need to add one for sentinels because the // underlying value data includes the sentinel - break :blk try mod.intRef(Type.usize, try slice_val.sliceLen(sema)); + break :blk try mod.intRef(Type.usize, try slice_val.sliceLen(mod)); } const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); @@ -32751,7 +32679,7 @@ fn cmpNumeric( if (lhs_val.isNan(mod) or rhs_val.isNan(mod)) { return if (op == std.math.CompareOperator.neq) .bool_true else .bool_false; } - return if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, sema)) + return if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, .sema)) .bool_true else .bool_false; @@ -32820,11 +32748,11 @@ fn cmpNumeric( // a signed integer with mantissa bits + 1, and if there was any non-integral part of the float, // add/subtract 1. const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| - !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema)) + !(try lhs_val.compareAllWithZeroSema(.gte, mod)) else (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(mod)); const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| - !(try rhs_val.compareAllWithZeroAdvanced(.gte, sema)) + !(try rhs_val.compareAllWithZeroSema(.gte, mod)) else (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(mod)); const dest_int_is_signed = lhs_is_signed or rhs_is_signed; @@ -32972,7 +32900,7 @@ fn compareIntsOnlyPossibleResult( ) Allocator.Error!?bool { const mod = sema.mod; const rhs_info = rhs_ty.intInfo(mod); - const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, sema) catch unreachable; + const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, .sema) catch unreachable; const is_zero = vs_zero == .eq; const is_negative = vs_zero == .lt; const is_positive = vs_zero == .gt; @@ -33136,7 +33064,6 @@ fn wrapErrorUnionPayload( } }))); } try sema.requireRuntimeBlock(block, inst_src, null); - try sema.queueFullTypeResolution(dest_payload_ty); return block.addTyOp(.wrap_errunion_payload, dest_ty, coerced); } @@ -33939,7 +33866,7 @@ fn resolvePeerTypesInner( opt_ptr_info = ptr_info; } - return .{ .success = try sema.ptrType(opt_ptr_info.?) }; + return .{ .success = try mod.ptrTypeSema(opt_ptr_info.?) }; }, .ptr => { @@ -34249,7 +34176,7 @@ fn resolvePeerTypesInner( }, } - return .{ .success = try sema.ptrType(opt_ptr_info.?) }; + return .{ .success = try mod.ptrTypeSema(opt_ptr_info.?) }; }, .func => { @@ -34606,7 +34533,7 @@ fn resolvePeerTypesInner( var comptime_val: ?Value = null; for (peer_tys) |opt_ty| { const struct_ty = opt_ty orelse continue; - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const uncoerced_field_val = try struct_ty.structFieldValueComptime(mod, field_index) orelse { comptime_val = null; @@ -34742,181 +34669,22 @@ pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void { const ip = &mod.intern_pool; const fn_ty_info = mod.typeToFunc(fn_ty).?; - try sema.resolveTypeFully(Type.fromInterned(fn_ty_info.return_type)); + try Type.fromInterned(fn_ty_info.return_type).resolveFully(mod); if (mod.comp.config.any_error_tracing and Type.fromInterned(fn_ty_info.return_type).isError(mod)) { // Ensure the type exists so that backends can assume that. - _ = try sema.getBuiltinType("StackTrace"); + _ = try mod.getBuiltinType("StackTrace"); } for (0..fn_ty_info.param_types.len) |i| { - try sema.resolveTypeFully(Type.fromInterned(fn_ty_info.param_types.get(ip)[i])); + try Type.fromInterned(fn_ty_info.param_types.get(ip)[i]).resolveFully(mod); } } -/// Make it so that calling hash() and eql() on `val` will not assert due -/// to a type not having its layout resolved. fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value { - const mod = sema.mod; - switch (mod.intern_pool.indexToKey(val.toIntern())) { - .int => |int| switch (int.storage) { - .u64, .i64, .big_int => return val, - .lazy_align, .lazy_size => return mod.intValue( - Type.fromInterned(int.ty), - (try val.getUnsignedIntAdvanced(mod, sema)).?, - ), - }, - .slice => |slice| { - const ptr = try sema.resolveLazyValue(Value.fromInterned(slice.ptr)); - const len = try sema.resolveLazyValue(Value.fromInterned(slice.len)); - if (ptr.toIntern() == slice.ptr and len.toIntern() == slice.len) return val; - return Value.fromInterned(try mod.intern(.{ .slice = .{ - .ty = slice.ty, - .ptr = ptr.toIntern(), - .len = len.toIntern(), - } })); - }, - .ptr => |ptr| { - switch (ptr.base_addr) { - .decl, .comptime_alloc, .anon_decl, .int => return val, - .comptime_field => |field_val| { - const resolved_field_val = - (try sema.resolveLazyValue(Value.fromInterned(field_val))).toIntern(); - return if (resolved_field_val == field_val) - val - else - Value.fromInterned((try mod.intern(.{ .ptr = .{ - .ty = ptr.ty, - .base_addr = .{ .comptime_field = resolved_field_val }, - .byte_offset = ptr.byte_offset, - } }))); - }, - .eu_payload, .opt_payload => |base| { - const resolved_base = (try sema.resolveLazyValue(Value.fromInterned(base))).toIntern(); - return if (resolved_base == base) - val - else - Value.fromInterned((try mod.intern(.{ .ptr = .{ - .ty = ptr.ty, - .base_addr = switch (ptr.base_addr) { - .eu_payload => .{ .eu_payload = resolved_base }, - .opt_payload => .{ .opt_payload = resolved_base }, - else => unreachable, - }, - .byte_offset = ptr.byte_offset, - } }))); - }, - .arr_elem, .field => |base_index| { - const resolved_base = (try sema.resolveLazyValue(Value.fromInterned(base_index.base))).toIntern(); - return if (resolved_base == base_index.base) - val - else - Value.fromInterned((try mod.intern(.{ .ptr = .{ - .ty = ptr.ty, - .base_addr = switch (ptr.base_addr) { - .arr_elem => .{ .arr_elem = .{ - .base = resolved_base, - .index = base_index.index, - } }, - .field => .{ .field = .{ - .base = resolved_base, - .index = base_index.index, - } }, - else => unreachable, - }, - .byte_offset = ptr.byte_offset, - } }))); - }, - } - }, - .aggregate => |aggregate| switch (aggregate.storage) { - .bytes => return val, - .elems => |elems| { - var resolved_elems: []InternPool.Index = &.{}; - for (elems, 0..) |elem, i| { - const resolved_elem = (try sema.resolveLazyValue(Value.fromInterned(elem))).toIntern(); - if (resolved_elems.len == 0 and resolved_elem != elem) { - resolved_elems = try sema.arena.alloc(InternPool.Index, elems.len); - @memcpy(resolved_elems[0..i], elems[0..i]); - } - if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem; - } - return if (resolved_elems.len == 0) val else Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = aggregate.ty, - .storage = .{ .elems = resolved_elems }, - } }))); - }, - .repeated_elem => |elem| { - const resolved_elem = (try sema.resolveLazyValue(Value.fromInterned(elem))).toIntern(); - return if (resolved_elem == elem) val else Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = aggregate.ty, - .storage = .{ .repeated_elem = resolved_elem }, - } }))); - }, - }, - .un => |un| { - const resolved_tag = if (un.tag == .none) - .none - else - (try sema.resolveLazyValue(Value.fromInterned(un.tag))).toIntern(); - const resolved_val = (try sema.resolveLazyValue(Value.fromInterned(un.val))).toIntern(); - return if (resolved_tag == un.tag and resolved_val == un.val) - val - else - Value.fromInterned((try mod.intern(.{ .un = .{ - .ty = un.ty, - .tag = resolved_tag, - .val = resolved_val, - } }))); - }, - else => return val, - } -} - -pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { - const mod = sema.mod; - switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .simple_type => |simple_type| return sema.resolveSimpleType(simple_type), - else => {}, - } - switch (ty.zigTypeTag(mod)) { - .Struct => return sema.resolveStructLayout(ty), - .Union => return sema.resolveUnionLayout(ty), - .Array => { - if (ty.arrayLenIncludingSentinel(mod) == 0) return; - const elem_ty = ty.childType(mod); - return sema.resolveTypeLayout(elem_ty); - }, - .Optional => { - const payload_ty = ty.optionalChild(mod); - // In case of querying the ABI alignment of this optional, we will ask - // for hasRuntimeBits() of the payload type, so we need "requires comptime" - // to be known already before this function returns. - _ = try sema.typeRequiresComptime(payload_ty); - return sema.resolveTypeLayout(payload_ty); - }, - .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(mod); - return sema.resolveTypeLayout(payload_ty); - }, - .Fn => { - const info = mod.typeToFunc(ty).?; - if (info.is_generic) { - // Resolving of generic function types is deferred to when - // the function is instantiated. - return; - } - const ip = &mod.intern_pool; - for (0..info.param_types.len) |i| { - const param_ty = info.param_types.get(ip)[i]; - try sema.resolveTypeLayout(Type.fromInterned(param_ty)); - } - try sema.resolveTypeLayout(Type.fromInterned(info.return_type)); - }, - else => {}, - } + return val.resolveLazy(sema.arena, sema.mod); } /// Resolve a struct's alignment only without triggering resolution of its layout. @@ -34925,11 +34693,13 @@ pub fn resolveStructAlignment( sema: *Sema, ty: InternPool.Index, struct_type: InternPool.LoadedStructType, -) CompileError!Alignment { +) SemaError!void { const mod = sema.mod; const ip = &mod.intern_pool; const target = mod.getTarget(); + assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?); + assert(struct_type.flagsPtr(ip).alignment == .none); assert(struct_type.layout != .@"packed"); @@ -34940,7 +34710,7 @@ pub fn resolveStructAlignment( struct_type.flagsPtr(ip).assumed_pointer_aligned = true; const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); struct_type.flagsPtr(ip).alignment = result; - return result; + return; } try sema.resolveTypeFieldsStruct(ty, struct_type); @@ -34952,7 +34722,7 @@ pub fn resolveStructAlignment( struct_type.flagsPtr(ip).assumed_pointer_aligned = true; const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); struct_type.flagsPtr(ip).alignment = result; - return result; + return; } defer struct_type.clearAlignmentWip(ip); @@ -34962,30 +34732,35 @@ pub fn resolveStructAlignment( const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty)) continue; - const field_align = try sema.structFieldAlignment( + const field_align = try mod.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, i), field_ty, struct_type.layout, + .sema, ); result = result.maxStrict(field_align); } struct_type.flagsPtr(ip).alignment = result; - return result; } -fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; const struct_type = zcu.typeToStruct(ty) orelse return; + assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?); + if (struct_type.haveLayout(ip)) return; - try sema.resolveTypeFields(ty); + try ty.resolveFields(zcu); if (struct_type.layout == .@"packed") { - try semaBackingIntType(zcu, struct_type); + semaBackingIntType(zcu, struct_type) catch |err| switch (err) { + error.OutOfMemory, error.AnalysisFail => |e| return e, + error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, + }; return; } @@ -35021,10 +34796,11 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { }, else => return err, }; - field_align.* = try sema.structFieldAlignment( + field_align.* = try zcu.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, i), field_ty, struct_type.layout, + .sema, ); big_align = big_align.maxStrict(field_align.*); } @@ -35160,7 +34936,7 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.LoadedStructType) Co var accumulator: u64 = 0; for (0..struct_type.field_types.len) |i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - accumulator += try field_ty.bitSizeAdvanced(mod, &sema); + accumulator += try field_ty.bitSizeAdvanced(mod, .sema); } break :blk accumulator; }; @@ -35270,11 +35046,13 @@ pub fn resolveUnionAlignment( sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType, -) CompileError!Alignment { +) SemaError!void { const mod = sema.mod; const ip = &mod.intern_pool; const target = mod.getTarget(); + assert(sema.ownerUnit().unwrap().decl == union_type.decl); + assert(!union_type.haveLayout(ip)); if (union_type.flagsPtr(ip).status == .field_types_wip) { @@ -35284,7 +35062,7 @@ pub fn resolveUnionAlignment( union_type.flagsPtr(ip).assumed_pointer_aligned = true; const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); union_type.flagsPtr(ip).alignment = result; - return result; + return; } try sema.resolveTypeFieldsUnion(ty, union_type); @@ -35304,11 +35082,10 @@ pub fn resolveUnionAlignment( } union_type.flagsPtr(ip).alignment = max_align; - return max_align; } /// This logic must be kept in sync with `Module.getUnionLayout`. -fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; @@ -35317,6 +35094,8 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { // Load again, since the tag type might have changed due to resolution. const union_type = ip.loadUnionType(ty.ip_index); + assert(sema.ownerUnit().unwrap().decl == union_type.decl); + switch (union_type.flagsPtr(ip).status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { @@ -35425,53 +35204,15 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { /// Returns `error.AnalysisFail` if any of the types (recursively) failed to /// be resolved. -pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { - const mod = sema.mod; - const ip = &mod.intern_pool; - switch (ty.zigTypeTag(mod)) { - .Pointer => { - return sema.resolveTypeFully(ty.childType(mod)); - }, - .Struct => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .struct_type => try sema.resolveStructFully(ty), - .anon_struct_type => |tuple| { - for (tuple.types.get(ip)) |field_ty| { - try sema.resolveTypeFully(Type.fromInterned(field_ty)); - } - }, - .simple_type => |simple_type| try sema.resolveSimpleType(simple_type), - else => {}, - }, - .Union => return sema.resolveUnionFully(ty), - .Array => return sema.resolveTypeFully(ty.childType(mod)), - .Optional => { - return sema.resolveTypeFully(ty.optionalChild(mod)); - }, - .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload(mod)), - .Fn => { - const info = mod.typeToFunc(ty).?; - if (info.is_generic) { - // Resolving of generic function types is deferred to when - // the function is instantiated. - return; - } - for (0..info.param_types.len) |i| { - const param_ty = info.param_types.get(ip)[i]; - try sema.resolveTypeFully(Type.fromInterned(param_ty)); - } - try sema.resolveTypeFully(Type.fromInterned(info.return_type)); - }, - else => {}, - } -} - -fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveStructFully(sema: *Sema, ty: Type) SemaError!void { try sema.resolveStructLayout(ty); const mod = sema.mod; const ip = &mod.intern_pool; const struct_type = mod.typeToStruct(ty).?; + assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?); + if (struct_type.setFullyResolved(ip)) return; errdefer struct_type.clearFullyResolved(ip); @@ -35481,16 +35222,19 @@ fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void { for (0..struct_type.field_types.len) |i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - try sema.resolveTypeFully(field_ty); + try field_ty.resolveFully(mod); } } -fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void { try sema.resolveUnionLayout(ty); const mod = sema.mod; const ip = &mod.intern_pool; const union_obj = mod.typeToUnion(ty).?; + + assert(sema.ownerUnit().unwrap().decl == union_obj.decl); + switch (union_obj.flagsPtr(ip).status) { .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {}, .fully_resolved_wip, .fully_resolved => return, @@ -35506,7 +35250,7 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { union_obj.flagsPtr(ip).status = .fully_resolved_wip; for (0..union_obj.field_types.len) |field_index| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - try sema.resolveTypeFully(field_ty); + try field_ty.resolveFully(mod); } union_obj.flagsPtr(ip).status = .fully_resolved; } @@ -35515,135 +35259,18 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { _ = try sema.typeRequiresComptime(ty); } -pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!void { - const mod = sema.mod; - const ip = &mod.intern_pool; - const ty_ip = ty.toIntern(); - - switch (ty_ip) { - .none => unreachable, - - .u0_type, - .i0_type, - .u1_type, - .u8_type, - .i8_type, - .u16_type, - .i16_type, - .u29_type, - .u32_type, - .i32_type, - .u64_type, - .i64_type, - .u80_type, - .u128_type, - .i128_type, - .usize_type, - .isize_type, - .c_char_type, - .c_short_type, - .c_ushort_type, - .c_int_type, - .c_uint_type, - .c_long_type, - .c_ulong_type, - .c_longlong_type, - .c_ulonglong_type, - .c_longdouble_type, - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .anyopaque_type, - .bool_type, - .void_type, - .type_type, - .anyerror_type, - .adhoc_inferred_error_set_type, - .comptime_int_type, - .comptime_float_type, - .noreturn_type, - .anyframe_type, - .null_type, - .undefined_type, - .enum_literal_type, - .manyptr_u8_type, - .manyptr_const_u8_type, - .manyptr_const_u8_sentinel_0_type, - .single_const_pointer_to_comptime_int_type, - .slice_const_u8_type, - .slice_const_u8_sentinel_0_type, - .optional_noreturn_type, - .anyerror_void_error_union_type, - .generic_poison_type, - .empty_struct_type, - => {}, - - .undef => unreachable, - .zero => unreachable, - .zero_usize => unreachable, - .zero_u8 => unreachable, - .one => unreachable, - .one_usize => unreachable, - .one_u8 => unreachable, - .four_u8 => unreachable, - .negative_one => unreachable, - .calling_convention_c => unreachable, - .calling_convention_inline => unreachable, - .void_value => unreachable, - .unreachable_value => unreachable, - .null_value => unreachable, - .bool_true => unreachable, - .bool_false => unreachable, - .empty_struct => unreachable, - .generic_poison => unreachable, - - else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) { - .type_struct, - .type_struct_packed, - .type_struct_packed_inits, - => try sema.resolveTypeFieldsStruct(ty_ip, ip.loadStructType(ty_ip)), - - .type_union => try sema.resolveTypeFieldsUnion(Type.fromInterned(ty_ip), ip.loadUnionType(ty_ip)), - .simple_type => try sema.resolveSimpleType(ip.indexToKey(ty_ip).simple_type), - else => {}, - }, - } -} - -/// Fully resolves a simple type. This is usually a nop, but for builtin types with -/// special InternPool indices (such as std.builtin.Type) it will analyze and fully -/// resolve the container type. -fn resolveSimpleType(sema: *Sema, simple_type: InternPool.SimpleType) CompileError!void { - const builtin_type_name: []const u8 = switch (simple_type) { - .atomic_order => "AtomicOrder", - .atomic_rmw_op => "AtomicRmwOp", - .calling_convention => "CallingConvention", - .address_space => "AddressSpace", - .float_mode => "FloatMode", - .reduce_op => "ReduceOp", - .call_modifier => "CallModifer", - .prefetch_options => "PrefetchOptions", - .export_options => "ExportOptions", - .extern_options => "ExternOptions", - .type_info => "Type", - else => return, - }; - // This will fully resolve the type. - _ = try sema.getBuiltinType(builtin_type_name); -} - pub fn resolveTypeFieldsStruct( sema: *Sema, ty: InternPool.Index, struct_type: InternPool.LoadedStructType, -) CompileError!void { +) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; // If there is no owner decl it means the struct has no fields. const owner_decl = struct_type.decl.unwrap() orelse return; + assert(sema.ownerUnit().unwrap().decl == owner_decl); + switch (zcu.declPtr(owner_decl).analysis) { .file_failure, .dependency_failure, @@ -35674,16 +35301,19 @@ pub fn resolveTypeFieldsStruct( } return error.AnalysisFail; }, - else => |e| return e, + error.OutOfMemory => return error.OutOfMemory, + error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; } -pub fn resolveStructFieldInits(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; const struct_type = zcu.typeToStruct(ty) orelse return; const owner_decl = struct_type.decl.unwrap() orelse return; + assert(sema.ownerUnit().unwrap().decl == owner_decl); + // Inits can start as resolved if (struct_type.haveFieldInits(ip)) return; @@ -35706,15 +35336,19 @@ pub fn resolveStructFieldInits(sema: *Sema, ty: Type) CompileError!void { } return error.AnalysisFail; }, - else => |e| return e, + error.OutOfMemory => return error.OutOfMemory, + error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; struct_type.setHaveFieldInits(ip); } -pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType) CompileError!void { +pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; const owner_decl = zcu.declPtr(union_type.decl); + + assert(sema.ownerUnit().unwrap().decl == union_type.decl); + switch (owner_decl.analysis) { .file_failure, .dependency_failure, @@ -35752,7 +35386,8 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load } return error.AnalysisFail; }, - else => |e| return e, + error.OutOfMemory => return error.OutOfMemory, + error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; union_type.flagsPtr(ip).status = .have_field_types; } @@ -36801,106 +36436,6 @@ fn generateUnionTagTypeSimple( return enum_ty; } -fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { - const zcu = sema.mod; - - var block: Block = .{ - .parent = null, - .sema = sema, - .namespace = sema.owner_decl.src_namespace, - .instructions = .{}, - .inlining = null, - .is_comptime = true, - .src_base_inst = sema.owner_decl.zir_decl_index.unwrap() orelse owner: { - assert(sema.owner_decl.has_tv); - assert(sema.owner_decl.owns_tv); - switch (sema.owner_decl.typeOf(zcu).zigTypeTag(zcu)) { - .Type => break :owner sema.owner_decl.val.toType().typeDeclInst(zcu).?, - .Fn => { - const owner = zcu.funcInfo(sema.owner_decl.val.toIntern()).generic_owner; - const generic_owner_decl = zcu.declPtr(zcu.funcInfo(owner).owner_decl); - break :owner generic_owner_decl.zir_decl_index.unwrap().?; - }, - else => unreachable, - } - }, - .type_name_ctx = sema.owner_decl.name, - }; - defer block.instructions.deinit(sema.gpa); - - const src = block.nodeOffset(0); - - const decl_index = try getBuiltinDecl(sema, &block, name); - return sema.analyzeDeclVal(&block, src, decl_index); -} - -fn getBuiltinDecl(sema: *Sema, block: *Block, name: []const u8) CompileError!InternPool.DeclIndex { - const gpa = sema.gpa; - - const src = block.nodeOffset(0); - - const mod = sema.mod; - const ip = &mod.intern_pool; - const std_mod = mod.std_mod; - const std_file = (mod.importPkg(std_mod) catch unreachable).file; - const opt_builtin_inst = (try sema.namespaceLookupRef( - block, - src, - mod.declPtr(std_file.root_decl.unwrap().?).src_namespace.toOptional(), - try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls), - )) orelse @panic("lib/std.zig is corrupt and missing 'builtin'"); - const builtin_inst = try sema.analyzeLoad(block, src, opt_builtin_inst, src); - const builtin_ty = sema.analyzeAsType(block, src, builtin_inst) catch |err| switch (err) { - error.AnalysisFail => std.debug.panic("std.builtin is corrupt", .{}), - else => |e| return e, - }; - const decl_index = (try sema.namespaceLookup( - block, - src, - builtin_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, name, .no_embedded_nulls), - )) orelse std.debug.panic("lib/std/builtin.zig is corrupt and missing '{s}'", .{name}); - return decl_index; -} - -fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type { - const zcu = sema.mod; - const ty_inst = try sema.getBuiltin(name); - - var block: Block = .{ - .parent = null, - .sema = sema, - .namespace = sema.owner_decl.src_namespace, - .instructions = .{}, - .inlining = null, - .is_comptime = true, - .src_base_inst = sema.owner_decl.zir_decl_index.unwrap() orelse owner: { - assert(sema.owner_decl.has_tv); - assert(sema.owner_decl.owns_tv); - switch (sema.owner_decl.typeOf(zcu).zigTypeTag(zcu)) { - .Type => break :owner sema.owner_decl.val.toType().typeDeclInst(zcu).?, - .Fn => { - const owner = zcu.funcInfo(sema.owner_decl.val.toIntern()).generic_owner; - const generic_owner_decl = zcu.declPtr(zcu.funcInfo(owner).owner_decl); - break :owner generic_owner_decl.zir_decl_index.unwrap().?; - }, - else => unreachable, - } - }, - .type_name_ctx = sema.owner_decl.name, - }; - defer block.instructions.deinit(sema.gpa); - - const src = block.nodeOffset(0); - - const result_ty = sema.analyzeAsType(&block, src, ty_inst) catch |err| switch (err) { - error.AnalysisFail => std.debug.panic("std.builtin.{s} is corrupt", .{name}), - else => |e| return e, - }; - try sema.resolveTypeFully(result_ty); // Should not fail - return result_ty; -} - /// There is another implementation of this in `Type.onePossibleValue`. This one /// in `Sema` is for calling during semantic analysis, and performs field resolution /// to get the answer. The one in `Type` is for calling during codegen and asserts @@ -37104,8 +36639,11 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, .struct_type => { + // Resolving the layout first helps to avoid loops. + // If the type has a coherent layout, we can recurse through fields safely. + try ty.resolveLayout(zcu); + const struct_type = ip.loadStructType(ty.toIntern()); - try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type); if (struct_type.field_types.len == 0) { // In this case the struct has no fields at all and @@ -37122,20 +36660,11 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { ); for (field_vals, 0..) |*field_val, i| { if (struct_type.fieldIsComptime(ip, i)) { - try sema.resolveStructFieldInits(ty); + try ty.resolveStructFieldInits(zcu); field_val.* = struct_type.field_inits.get(ip)[i]; continue; } const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - if (field_ty.eql(ty, zcu)) { - const msg = try sema.errMsg( - ty.srcLoc(zcu), - "struct '{}' depends on itself", - .{ty.fmt(zcu)}, - ); - try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{}); - return sema.failWithOwnedErrorMsg(null, msg); - } if (try sema.typeHasOnePossibleValue(field_ty)) |field_opv| { field_val.* = field_opv.toIntern(); } else return null; @@ -37163,8 +36692,11 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, .union_type => { + // Resolving the layout first helps to avoid loops. + // If the type has a coherent layout, we can recurse through fields safely. + try ty.resolveLayout(zcu); + const union_obj = ip.loadUnionType(ty.toIntern()); - try sema.resolveTypeFieldsUnion(ty, union_obj); const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypePtr(ip).*))) orelse return null; if (union_obj.field_types.len == 0) { @@ -37172,15 +36704,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return Value.fromInterned(only); } const only_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]); - if (only_field_ty.eql(ty, zcu)) { - const msg = try sema.errMsg( - ty.srcLoc(zcu), - "union '{}' depends on itself", - .{ty.fmt(zcu)}, - ); - try sema.addFieldErrNote(ty, 0, msg, "while checking this field", .{}); - return sema.failWithOwnedErrorMsg(null, msg); - } const val_val = (try sema.typeHasOnePossibleValue(only_field_ty)) orelse return null; const only = try zcu.intern(.{ .un = .{ @@ -37298,7 +36821,7 @@ fn analyzeComptimeAlloc( // Needed to make an anon decl with type `var_type` (the `finish()` call below). _ = try sema.typeHasOnePossibleValue(var_type); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try mod.ptrTypeSema(.{ .child = var_type.toIntern(), .flags = .{ .alignment = alignment, @@ -37485,64 +37008,28 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { /// `generic_poison` will return false. /// May return false negatives when structs and unions are having their field types resolved. -pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { - return ty.comptimeOnlyAdvanced(sema.mod, sema); +pub fn typeRequiresComptime(sema: *Sema, ty: Type) SemaError!bool { + return ty.comptimeOnlyAdvanced(sema.mod, .sema); } -pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - const mod = sema.mod; - return ty.hasRuntimeBitsAdvanced(mod, false, .{ .sema = sema }) catch |err| switch (err) { +pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) SemaError!bool { + return ty.hasRuntimeBitsAdvanced(sema.mod, false, .sema) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }; } -pub fn typeAbiSize(sema: *Sema, ty: Type) !u64 { - try sema.resolveTypeLayout(ty); +pub fn typeAbiSize(sema: *Sema, ty: Type) SemaError!u64 { + try ty.resolveLayout(sema.mod); return ty.abiSize(sema.mod); } -pub fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!Alignment { - return (try ty.abiAlignmentAdvanced(sema.mod, .{ .sema = sema })).scalar; -} - -/// Not valid to call for packed unions. -/// Keep implementation in sync with `Module.unionFieldNormalAlignment`. -pub fn unionFieldAlignment(sema: *Sema, u: InternPool.LoadedUnionType, field_index: u32) !Alignment { - const mod = sema.mod; - const ip = &mod.intern_pool; - const field_align = u.fieldAlign(ip, field_index); - if (field_align != .none) return field_align; - const field_ty = Type.fromInterned(u.field_types.get(ip)[field_index]); - if (field_ty.isNoReturn(sema.mod)) return .none; - return sema.typeAbiAlignment(field_ty); -} - -/// Keep implementation in sync with `Module.structFieldAlignment`. -pub fn structFieldAlignment( - sema: *Sema, - explicit_alignment: InternPool.Alignment, - field_ty: Type, - layout: std.builtin.Type.ContainerLayout, -) !Alignment { - if (explicit_alignment != .none) - return explicit_alignment; - const mod = sema.mod; - switch (layout) { - .@"packed" => return .none, - .auto => if (mod.getTarget().ofmt != .c) return sema.typeAbiAlignment(field_ty), - .@"extern" => {}, - } - // extern - const ty_abi_align = try sema.typeAbiAlignment(field_ty); - if (field_ty.isAbiInt(mod) and field_ty.intInfo(mod).bits >= 128) { - return ty_abi_align.maxStrict(.@"16"); - } - return ty_abi_align; +pub fn typeAbiAlignment(sema: *Sema, ty: Type) SemaError!Alignment { + return (try ty.abiAlignmentAdvanced(sema.mod, .sema)).scalar; } pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - return ty.fnHasRuntimeBitsAdvanced(sema.mod, sema); + return ty.fnHasRuntimeBitsAdvanced(sema.mod, .sema); } fn unionFieldIndex( @@ -37554,7 +37041,7 @@ fn unionFieldIndex( ) !u32 { const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeFields(union_ty); + try union_ty.resolveFields(mod); const union_obj = mod.typeToUnion(union_ty).?; const field_index = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_ty, union_obj, field_src, field_name); @@ -37570,7 +37057,7 @@ fn structFieldIndex( ) !u32 { const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeFields(struct_ty); + try struct_ty.resolveFields(mod); if (struct_ty.isAnonStruct(mod)) { return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src); } else { @@ -37601,10 +37088,6 @@ fn anonStructFieldIndex( }); } -fn queueFullTypeResolution(sema: *Sema, ty: Type) !void { - try sema.types_to_resolve.put(sema.gpa, ty.toIntern(), {}); -} - /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting /// overflow_idx to the vector index the overflow was at (or 0 for a scalar). fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value { @@ -37662,8 +37145,8 @@ fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, @@ -37752,8 +37235,8 @@ fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, @@ -37836,8 +37319,8 @@ fn intSubWithOverflowScalar( var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -38024,7 +37507,7 @@ fn intFitsInType( fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool { const mod = sema.mod; - if (!(try int_val.compareAllWithZeroAdvanced(.gte, sema))) return false; + if (!(try int_val.compareAllWithZeroSema(.gte, mod))) return false; const end_val = try mod.intValue(tag_ty, end); if (!(try sema.compareAll(int_val, .lt, end_val, tag_ty))) return false; return true; @@ -38094,8 +37577,8 @@ fn intAddWithOverflowScalar( var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -38149,7 +37632,7 @@ fn compareScalar( switch (op) { .eq => return sema.valuesEqual(coerced_lhs, coerced_rhs, ty), .neq => return !(try sema.valuesEqual(coerced_lhs, coerced_rhs, ty)), - else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, mod, sema), + else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, mod, .sema), } } @@ -38185,80 +37668,6 @@ fn compareVector( } }))); } -/// Returns the type of a pointer to an element. -/// Asserts that the type is a pointer, and that the element type is indexable. -/// If the element index is comptime-known, it must be passed in `offset`. -/// For *@Vector(n, T), return *align(a:b:h:v) T -/// For *[N]T, return *T -/// For [*]T, returns *T -/// For []T, returns *T -/// Handles const-ness and address spaces in particular. -/// This code is duplicated in `analyzePtrArithmetic`. -pub fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { - const mod = sema.mod; - const ptr_info = ptr_ty.ptrInfo(mod); - const elem_ty = ptr_ty.elemType2(mod); - const is_allowzero = ptr_info.flags.is_allowzero and (offset orelse 0) == 0; - const parent_ty = ptr_ty.childType(mod); - - const VI = InternPool.Key.PtrType.VectorIndex; - - const vector_info: struct { - host_size: u16 = 0, - alignment: Alignment = .none, - vector_index: VI = .none, - } = if (parent_ty.isVector(mod) and ptr_info.flags.size == .One) blk: { - const elem_bits = elem_ty.bitSize(mod); - if (elem_bits == 0) break :blk .{}; - const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits); - if (!is_packed) break :blk .{}; - - break :blk .{ - .host_size = @intCast(parent_ty.arrayLen(mod)), - .alignment = parent_ty.abiAlignment(mod), - .vector_index = if (offset) |some| @enumFromInt(some) else .runtime, - }; - } else .{}; - - const alignment: Alignment = a: { - // Calculate the new pointer alignment. - if (ptr_info.flags.alignment == .none) { - // In case of an ABI-aligned pointer, any pointer arithmetic - // maintains the same ABI-alignedness. - break :a vector_info.alignment; - } - // If the addend is not a comptime-known value we can still count on - // it being a multiple of the type size. - const elem_size = try sema.typeAbiSize(elem_ty); - const addend = if (offset) |off| elem_size * off else elem_size; - - // The resulting pointer is aligned to the lcd between the offset (an - // arbitrary number) and the alignment factor (always a power of two, - // non zero). - const new_align: Alignment = @enumFromInt(@min( - @ctz(addend), - ptr_info.flags.alignment.toLog2Units(), - )); - assert(new_align != .none); - break :a new_align; - }; - return sema.ptrType(.{ - .child = elem_ty.toIntern(), - .flags = .{ - .alignment = alignment, - .is_const = ptr_info.flags.is_const, - .is_volatile = ptr_info.flags.is_volatile, - .is_allowzero = is_allowzero, - .address_space = ptr_info.flags.address_space, - .vector_index = vector_info.vector_index, - }, - .packed_offset = .{ - .host_size = vector_info.host_size, - .bit_offset = 0, - }, - }); -} - /// Merge lhs with rhs. /// Asserts that lhs and rhs are both error sets and are resolved. fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type { @@ -38299,13 +37708,6 @@ fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool return sema.typeOf(ref).zigTypeTag(sema.mod) == tag; } -pub fn ptrType(sema: *Sema, info: InternPool.Key.PtrType) CompileError!Type { - if (info.flags.alignment != .none) { - _ = try sema.typeAbiAlignment(Type.fromInterned(info.child)); - } - return sema.mod.ptrType(info); -} - pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void { if (!sema.mod.comp.debug_incremental) return; @@ -38425,12 +37827,12 @@ fn maybeDerefSliceAsArray( else => unreachable, }; const elem_ty = Type.fromInterned(slice.ty).childType(zcu); - const len = try Value.fromInterned(slice.len).toUnsignedIntAdvanced(sema); + const len = try Value.fromInterned(slice.len).toUnsignedIntSema(zcu); const array_ty = try zcu.arrayType(.{ .child = elem_ty.toIntern(), .len = len, }); - const ptr_ty = try sema.ptrType(p: { + const ptr_ty = try zcu.ptrTypeSema(p: { var p = Type.fromInterned(slice.ty).ptrInfo(zcu); p.flags.size = .One; p.child = array_ty.toIntern(); |
