From 7e552dc1e9a8388f71cc32083deb9dd848e79808 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 29 Jun 2024 01:36:25 +0100 Subject: Zcu: rework exports This commit reworks our representation of exported Decls and values in Zcu to be memory-optimized and trivially serialized. All exports are now stored in the `all_exports` array on `Zcu`. An `AnalUnit` which performs an export (either through an `export` annotation or by containing an analyzed `@export`) gains an entry into `single_exports` if it performs only one export, or `multi_exports` if it performs multiple. We no longer store a persistent mapping from a `Decl`/value to all exports of that entity; this state is not necessary for the majority of the pipeline. Instead, we construct it in `Zcu.processExports`, just before flush. This does not affect the algorithmic complexity of `processExports`, since this function already iterates all exports in the `Zcu`. The elimination of `decl_exports` and `value_exports` led to a few non-trivial backend changes. The LLVM backend has been wrangled into a more reasonable state in general regarding exports and externs. The C backend is currently disabled in this commit, because its support for `export` was quite broken, and that was exposed by this work -- I'm hoping @jacobly0 will be able to pick this up! --- src/codegen/c.zig | 2 + src/codegen/llvm.zig | 263 +++++++++++++++++++++------------------------------ 2 files changed, 111 insertions(+), 154 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 94f8faa441..a8e58a1055 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -3081,6 +3081,8 @@ pub fn genDeclValue( } pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { + if (true) @panic("TODO jacobly"); + const tracy = trace(@src()); defer tracy.end(); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 00cfd4404a..dd6606ece7 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -848,10 +848,6 @@ pub const Object = struct { /// Note that the values are not added until `emit`, when all errors in /// the compilation are known. error_name_table: Builder.Variable.Index, - /// This map is usually very close to empty. It tracks only the cases when a - /// second extern Decl could not be emitted with the correct name due to a - /// name collision. - extern_collisions: std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, void), /// Memoizes a null `?usize` value. null_opt_usize: Builder.Constant, @@ -1011,7 +1007,6 @@ pub const Object = struct { .named_enum_map = .{}, .type_map = .{}, .error_name_table = .none, - .extern_collisions = .{}, .null_opt_usize = .no_init, .struct_field_map = .{}, }; @@ -1029,7 +1024,6 @@ pub const Object = struct { self.anon_decl_map.deinit(gpa); self.named_enum_map.deinit(gpa); self.type_map.deinit(gpa); - self.extern_collisions.deinit(gpa); self.builder.deinit(); self.struct_field_map.deinit(gpa); self.* = undefined; @@ -1121,61 +1115,6 @@ pub const Object = struct { try object.builder.finishModuleAsm(); } - fn resolveExportExternCollisions(object: *Object) !void { - const mod = object.module; - - // This map has externs with incorrect symbol names. - for (object.extern_collisions.keys()) |decl_index| { - const global = object.decl_map.get(decl_index) orelse continue; - // Same logic as below but for externs instead of exports. - const decl_name = object.builder.strtabStringIfExists(mod.declPtr(decl_index).name.toSlice(&mod.intern_pool)) orelse continue; - const other_global = object.builder.getGlobal(decl_name) orelse continue; - if (other_global.toConst().getBase(&object.builder) == - global.toConst().getBase(&object.builder)) continue; - - try global.replace(other_global, &object.builder); - } - object.extern_collisions.clearRetainingCapacity(); - - for (mod.decl_exports.keys(), mod.decl_exports.values()) |decl_index, export_list| { - const global = object.decl_map.get(decl_index) orelse continue; - try resolveGlobalCollisions(object, global, export_list.items); - } - - for (mod.value_exports.keys(), mod.value_exports.values()) |val, export_list| { - const global = object.anon_decl_map.get(val) orelse continue; - try resolveGlobalCollisions(object, global, export_list.items); - } - } - - fn resolveGlobalCollisions( - object: *Object, - global: Builder.Global.Index, - export_list: []const *Module.Export, - ) !void { - const mod = object.module; - const global_base = global.toConst().getBase(&object.builder); - for (export_list) |exp| { - // Detect if the LLVM global has already been created as an extern. In such - // case, we need to replace all uses of it with this exported global. - const exp_name = object.builder.strtabStringIfExists(exp.opts.name.toSlice(&mod.intern_pool)) orelse continue; - - const other_global = object.builder.getGlobal(exp_name) orelse continue; - if (other_global.toConst().getBase(&object.builder) == global_base) continue; - - try global.takeName(other_global, &object.builder); - try other_global.replace(global, &object.builder); - // Problem: now we need to replace in the decl_map that - // the extern decl index points to this new global. However we don't - // know the decl index. - // Even if we did, a future incremental update to the extern would then - // treat the LLVM global as an extern rather than an export, so it would - // need a way to check that. - // This is a TODO that needs to be solved when making - // the LLVM backend support incremental compilation. - } - } - pub const EmitOptions = struct { pre_ir_path: ?[]const u8, pre_bc_path: ?[]const u8, @@ -1193,7 +1132,6 @@ pub const Object = struct { pub fn emit(self: *Object, options: EmitOptions) !void { { - try self.resolveExportExternCollisions(); try self.genErrorNameTable(); try self.genCmpLtErrorsLenFunction(); try self.genModuleLevelAssembly(); @@ -1698,8 +1636,7 @@ pub const Object = struct { const file = try o.getDebugFile(namespace.file_scope); const line_number = decl.navSrcLine(zcu) + 1; - const is_internal_linkage = decl.val.getExternFunc(zcu) == null and - !zcu.decl_exports.contains(decl_index); + const is_internal_linkage = decl.val.getExternFunc(zcu) == null; const debug_decl_type = try o.lowerDebugType(decl.typeOf(zcu)); const subprogram = try o.builder.debugSubprogram( @@ -1760,8 +1697,6 @@ pub const Object = struct { }; try fg.wip.finish(); - - try o.updateExports(zcu, .{ .decl_index = decl_index }, zcu.getDeclExports(decl_index)); } pub fn updateDecl(self: *Object, module: *Module, decl_index: InternPool.DeclIndex) !void { @@ -1781,66 +1716,25 @@ pub const Object = struct { }, else => |e| return e, }; - try self.updateExports(module, .{ .decl_index = decl_index }, module.getDeclExports(decl_index)); } pub fn updateExports( self: *Object, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { const decl_index = switch (exported) { .decl_index => |i| i, - .value => |val| return updateExportedValue(self, mod, val, exports), + .value => |val| return updateExportedValue(self, mod, val, export_indices), }; - const gpa = mod.gpa; const ip = &mod.intern_pool; - // If the module does not already have the function, we ignore this function call - // because we call `updateExports` at the end of `updateFunc` and `updateDecl`. - const global_index = self.decl_map.get(decl_index) orelse return; + const global_index = self.decl_map.get(decl_index).?; const decl = mod.declPtr(decl_index); const comp = mod.comp; - if (decl.isExtern(mod)) { - const decl_name = decl_name: { - if (mod.getTarget().isWasm() and decl.val.typeOf(mod).zigTypeTag(mod) == .Fn) { - if (decl.getOwnedExternFunc(mod).?.lib_name.toSlice(ip)) |lib_name| { - if (!std.mem.eql(u8, lib_name, "c")) { - break :decl_name try self.builder.strtabStringFmt("{}|{s}", .{ decl.name.fmt(ip), lib_name }); - } - } - } - break :decl_name try self.builder.strtabString(decl.name.toSlice(ip)); - }; - if (self.builder.getGlobal(decl_name)) |other_global| { - if (other_global != global_index) { - try self.extern_collisions.put(gpa, decl_index, {}); - } - } - - try global_index.rename(decl_name, &self.builder); - global_index.setLinkage(.external, &self.builder); - global_index.setUnnamedAddr(.default, &self.builder); - if (comp.config.dll_export_fns) - global_index.setDllStorageClass(.default, &self.builder); - - if (decl.val.getVariable(mod)) |decl_var| { - global_index.ptrConst(&self.builder).kind.variable.setThreadLocal( - if (decl_var.is_threadlocal) .generaldynamic else .default, - &self.builder, - ); - if (decl_var.is_weak_linkage) global_index.setLinkage(.extern_weak, &self.builder); - } - } else if (exports.len != 0) { - const main_exp_name = try self.builder.strtabString(exports[0].opts.name.toSlice(ip)); - try global_index.rename(main_exp_name, &self.builder); - - if (decl.val.getVariable(mod)) |decl_var| if (decl_var.is_threadlocal) - global_index.ptrConst(&self.builder).kind - .variable.setThreadLocal(.generaldynamic, &self.builder); - - return updateExportedGlobal(self, mod, global_index, exports); + if (export_indices.len != 0) { + return updateExportedGlobal(self, mod, global_index, export_indices); } else { const fqn = try self.builder.strtabString((try decl.fullyQualifiedName(mod)).toSlice(ip)); try global_index.rename(fqn, &self.builder); @@ -1848,17 +1742,6 @@ pub const Object = struct { if (comp.config.dll_export_fns) global_index.setDllStorageClass(.default, &self.builder); global_index.setUnnamedAddr(.unnamed_addr, &self.builder); - if (decl.val.getVariable(mod)) |decl_var| { - const decl_namespace = mod.namespacePtr(decl.src_namespace); - const single_threaded = decl_namespace.file_scope.mod.single_threaded; - global_index.ptrConst(&self.builder).kind.variable.setThreadLocal( - if (decl_var.is_threadlocal and !single_threaded) - .generaldynamic - else - .default, - &self.builder, - ); - } } } @@ -1866,11 +1749,11 @@ pub const Object = struct { o: *Object, mod: *Module, exported_value: InternPool.Index, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { const gpa = mod.gpa; const ip = &mod.intern_pool; - const main_exp_name = try o.builder.strtabString(exports[0].opts.name.toSlice(ip)); + const main_exp_name = try o.builder.strtabString(mod.all_exports.items[export_indices[0]].opts.name.toSlice(ip)); const global_index = i: { const gop = try o.anon_decl_map.getOrPut(gpa, exported_value); if (gop.found_existing) { @@ -1894,32 +1777,57 @@ pub const Object = struct { try variable_index.setInitializer(init_val, &o.builder); break :i global_index; }; - return updateExportedGlobal(o, mod, global_index, exports); + return updateExportedGlobal(o, mod, global_index, export_indices); } fn updateExportedGlobal( o: *Object, mod: *Module, global_index: Builder.Global.Index, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { const comp = mod.comp; const ip = &mod.intern_pool; + const first_export = mod.all_exports.items[export_indices[0]]; + + // We will rename this global to have a name matching `first_export`. + // Successive exports become aliases. + // If the first export name already exists, then there is a corresponding + // extern global - we replace it with this global. + const first_exp_name = try o.builder.strtabString(first_export.opts.name.toSlice(ip)); + if (o.builder.getGlobal(first_exp_name)) |other_global| replace: { + if (other_global.toConst().getBase(&o.builder) == global_index.toConst().getBase(&o.builder)) { + break :replace; // this global already has the name we want + } + try global_index.takeName(other_global, &o.builder); + try other_global.replace(global_index, &o.builder); + // Problem: now we need to replace in the decl_map that + // the extern decl index points to this new global. However we don't + // know the decl index. + // Even if we did, a future incremental update to the extern would then + // treat the LLVM global as an extern rather than an export, so it would + // need a way to check that. + // This is a TODO that needs to be solved when making + // the LLVM backend support incremental compilation. + } else { + try global_index.rename(first_exp_name, &o.builder); + } + global_index.setUnnamedAddr(.default, &o.builder); if (comp.config.dll_export_fns) global_index.setDllStorageClass(.dllexport, &o.builder); - global_index.setLinkage(switch (exports[0].opts.linkage) { + global_index.setLinkage(switch (first_export.opts.linkage) { .internal => unreachable, .strong => .external, .weak => .weak_odr, .link_once => .linkonce_odr, }, &o.builder); - global_index.setVisibility(switch (exports[0].opts.visibility) { + global_index.setVisibility(switch (first_export.opts.visibility) { .default => .default, .hidden => .hidden, .protected => .protected, }, &o.builder); - if (exports[0].opts.section.toSlice(ip)) |section| + if (first_export.opts.section.toSlice(ip)) |section| switch (global_index.ptrConst(&o.builder).kind) { .variable => |impl_index| impl_index.setSection( try o.builder.string(section), @@ -1936,7 +1844,8 @@ pub const Object = struct { // The planned solution to this is https://github.com/ziglang/zig/issues/13265 // Until then we iterate over existing aliases and make them point // to the correct decl, or otherwise add a new alias. Old aliases are leaked. - for (exports[1..]) |exp| { + for (export_indices[1..]) |export_idx| { + const exp = mod.all_exports.items[export_idx]; const exp_name = try o.builder.strtabString(exp.opts.name.toSlice(ip)); if (o.builder.getGlobal(exp_name)) |global| { switch (global.ptrConst(&o.builder).kind) { @@ -1944,7 +1853,13 @@ pub const Object = struct { alias.setAliasee(global_index.toConst(), &o.builder); continue; }, - .variable, .function => {}, + .variable, .function => { + // This existing global is an `extern` corresponding to this export. + // Replace it with the global being exported. + // This existing global must be replaced with the alias. + try global.rename(.empty, &o.builder); + try global.replace(global_index, &o.builder); + }, .replaced => unreachable, } } @@ -4762,36 +4677,77 @@ pub const DeclGen = struct { else => try o.lowerValue(init_val), }, &o.builder); + if (decl.val.getVariable(zcu)) |decl_var| { + const decl_namespace = zcu.namespacePtr(decl.src_namespace); + const single_threaded = decl_namespace.file_scope.mod.single_threaded; + variable_index.setThreadLocal( + if (decl_var.is_threadlocal and !single_threaded) .generaldynamic else .default, + &o.builder, + ); + } + const line_number = decl.navSrcLine(zcu) + 1; - const is_internal_linkage = !o.module.decl_exports.contains(decl_index); const namespace = zcu.namespacePtr(decl.src_namespace); const owner_mod = namespace.file_scope.mod; - if (owner_mod.strip) return; + if (!owner_mod.strip) { + const debug_file = try o.getDebugFile(namespace.file_scope); + + const debug_global_var = try o.builder.debugGlobalVar( + try o.builder.metadataString(decl.name.toSlice(ip)), // Name + try o.builder.metadataStringFromStrtabString(variable_index.name(&o.builder)), // Linkage name + debug_file, // File + debug_file, // Scope + line_number, + try o.lowerDebugType(decl.typeOf(zcu)), + variable_index, + .{ .local = !decl.isExtern(zcu) }, + ); - const debug_file = try o.getDebugFile(namespace.file_scope); + const debug_expression = try o.builder.debugExpression(&.{}); - const debug_global_var = try o.builder.debugGlobalVar( - try o.builder.metadataString(decl.name.toSlice(ip)), // Name - try o.builder.metadataStringFromStrtabString(variable_index.name(&o.builder)), // Linkage name - debug_file, // File - debug_file, // Scope - line_number, - try o.lowerDebugType(decl.typeOf(zcu)), - variable_index, - .{ .local = is_internal_linkage }, - ); + const debug_global_var_expression = try o.builder.debugGlobalVarExpression( + debug_global_var, + debug_expression, + ); - const debug_expression = try o.builder.debugExpression(&.{}); + variable_index.setGlobalVariableExpression(debug_global_var_expression, &o.builder); + try o.debug_globals.append(o.gpa, debug_global_var_expression); + } + } - const debug_global_var_expression = try o.builder.debugGlobalVarExpression( - debug_global_var, - debug_expression, - ); + if (decl.isExtern(zcu)) { + const global_index = o.decl_map.get(decl_index).?; - variable_index.setGlobalVariableExpression(debug_global_var_expression, &o.builder); - try o.debug_globals.append(o.gpa, debug_global_var_expression); + const decl_name = decl_name: { + if (zcu.getTarget().isWasm() and decl.typeOf(zcu).zigTypeTag(zcu) == .Fn) { + if (decl.getOwnedExternFunc(zcu).?.lib_name.toSlice(ip)) |lib_name| { + if (!std.mem.eql(u8, lib_name, "c")) { + break :decl_name try o.builder.strtabStringFmt("{}|{s}", .{ decl.name.fmt(ip), lib_name }); + } + } + } + break :decl_name try o.builder.strtabString(decl.name.toSlice(ip)); + }; + + if (o.builder.getGlobal(decl_name)) |other_global| { + if (other_global != global_index) { + // Another global already has this name; just use it in place of this global. + try global_index.replace(other_global, &o.builder); + return; + } + } + + try global_index.rename(decl_name, &o.builder); + global_index.setLinkage(.external, &o.builder); + global_index.setUnnamedAddr(.default, &o.builder); + if (zcu.comp.config.dll_export_fns) + global_index.setDllStorageClass(.default, &o.builder); + + if (decl.val.getVariable(zcu)) |decl_var| { + if (decl_var.is_weak_linkage) global_index.setLinkage(.extern_weak, &o.builder); + } } } }; @@ -5193,7 +5149,6 @@ pub const FuncGen = struct { const fqn = try decl.fullyQualifiedName(zcu); - const is_internal_linkage = !zcu.decl_exports.contains(decl_index); const fn_ty = try zcu.funcType(.{ .param_types = &.{}, .return_type = .void_type, @@ -5211,7 +5166,7 @@ pub const FuncGen = struct { .sp_flags = .{ .Optimized = owner_mod.optimize_mode != .Debug, .Definition = true, - .LocalToUnit = is_internal_linkage, + .LocalToUnit = true, // TODO: we can't know this at this point, since the function could be exported later! }, }, o.debug_compile_unit, -- cgit v1.2.3 From 5f03c025058ddda09bfb3eac283bb88d30ad38cc Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 29 Jun 2024 04:16:47 +0100 Subject: Zcu: key compile errors on `AnalUnit` where appropriate This change seeks to more appropriately model the way semantic analysis works by drawing a more clear line between errors emitted by analyzing a `Decl` (in future a `Cau`) and errors emitted by analyzing a runtime function. This does change a few compile errors surrounding compile logs by adding more "also here" notes. The new notes are more technically correct, but perhaps not so helpful. They're not doing enough harm for me to put extensive thought into this for now. --- src/Compilation.zig | 128 +++++++++++---------- src/Sema.zig | 27 ++--- src/Zcu.zig | 52 ++++----- src/codegen/llvm.zig | 4 +- src/codegen/spirv.zig | 2 +- src/link/Coff.zig | 7 +- src/link/Elf/ZigObject.zig | 7 +- src/link/MachO/ZigObject.zig | 7 +- src/link/Plan9.zig | 7 +- src/link/Wasm/ZigObject.zig | 7 +- ...og_of_tagged_enum_doesnt_crash_the_compiler.zig | 1 + test/cases/compile_errors/compile_log.zig | 1 + 12 files changed, 130 insertions(+), 120 deletions(-) (limited to 'src/codegen') diff --git a/src/Compilation.zig b/src/Compilation.zig index e0bbdd2e03..4c693ffb28 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2831,11 +2831,11 @@ pub fn totalErrorCount(comp: *Compilation) u32 { } } - if (comp.module) |module| { - total += module.failed_exports.count(); - total += module.failed_embed_files.count(); + if (comp.module) |zcu| { + total += zcu.failed_exports.count(); + total += zcu.failed_embed_files.count(); - for (module.failed_files.keys(), module.failed_files.values()) |file, error_msg| { + for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| { if (error_msg) |_| { total += 1; } else { @@ -2851,23 +2851,27 @@ pub fn totalErrorCount(comp: *Compilation) u32 { // When a parse error is introduced, we keep all the semantic analysis for // the previous parse success, including compile errors, but we cannot // emit them until the file succeeds parsing. - for (module.failed_decls.keys()) |key| { - if (module.declFileScope(key).okToReportErrors()) { + for (zcu.failed_analysis.keys()) |key| { + const decl_index = switch (key.unwrap()) { + .decl => |d| d, + .func => |ip_index| zcu.funcInfo(ip_index).owner_decl, + }; + if (zcu.declFileScope(decl_index).okToReportErrors()) { total += 1; - if (module.cimport_errors.get(key)) |errors| { + if (zcu.cimport_errors.get(key)) |errors| { total += errors.errorMessageCount(); } } } - if (module.emit_h) |emit_h| { + if (zcu.emit_h) |emit_h| { for (emit_h.failed_decls.keys()) |key| { - if (module.declFileScope(key).okToReportErrors()) { + if (zcu.declFileScope(key).okToReportErrors()) { total += 1; } } } - if (module.global_error_set.entries.len - 1 > module.error_limit) { + if (zcu.global_error_set.entries.len - 1 > zcu.error_limit) { total += 1; } } @@ -2882,8 +2886,8 @@ pub fn totalErrorCount(comp: *Compilation) u32 { // Compile log errors only count if there are no other errors. if (total == 0) { - if (comp.module) |module| { - total += @intFromBool(module.compile_log_decls.count() != 0); + if (comp.module) |zcu| { + total += @intFromBool(zcu.compile_log_sources.count() != 0); } } @@ -2934,10 +2938,10 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { .msg = try bundle.addString("memory allocation failure"), }); } - if (comp.module) |module| { - for (module.failed_files.keys(), module.failed_files.values()) |file, error_msg| { + if (comp.module) |zcu| { + for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| { if (error_msg) |msg| { - try addModuleErrorMsg(module, &bundle, msg.*); + try addModuleErrorMsg(zcu, &bundle, msg.*); } else { // Must be ZIR errors. Note that this may include AST errors. // addZirErrorMessages asserts that the tree is loaded. @@ -2945,54 +2949,59 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { try addZirErrorMessages(&bundle, file); } } - for (module.failed_embed_files.values()) |error_msg| { - try addModuleErrorMsg(module, &bundle, error_msg.*); + for (zcu.failed_embed_files.values()) |error_msg| { + try addModuleErrorMsg(zcu, &bundle, error_msg.*); } - for (module.failed_decls.keys(), module.failed_decls.values()) |decl_index, error_msg| { + for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| { + const decl_index = switch (anal_unit.unwrap()) { + .decl => |d| d, + .func => |ip_index| zcu.funcInfo(ip_index).owner_decl, + }; + // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. - if (module.declFileScope(decl_index).okToReportErrors()) { - try addModuleErrorMsg(module, &bundle, error_msg.*); - if (module.cimport_errors.get(decl_index)) |errors| { - for (errors.getMessages()) |err_msg_index| { - const err_msg = errors.getErrorMessage(err_msg_index); - try bundle.addRootErrorMessage(.{ - .msg = try bundle.addString(errors.nullTerminatedString(err_msg.msg)), - .src_loc = if (err_msg.src_loc != .none) blk: { - const src_loc = errors.getSourceLocation(err_msg.src_loc); - break :blk try bundle.addSourceLocation(.{ - .src_path = try bundle.addString(errors.nullTerminatedString(src_loc.src_path)), - .span_start = src_loc.span_start, - .span_main = src_loc.span_main, - .span_end = src_loc.span_end, - .line = src_loc.line, - .column = src_loc.column, - .source_line = if (src_loc.source_line != 0) try bundle.addString(errors.nullTerminatedString(src_loc.source_line)) else 0, - }); - } else .none, - }); - } + if (!zcu.declFileScope(decl_index).okToReportErrors()) continue; + + try addModuleErrorMsg(zcu, &bundle, error_msg.*); + if (zcu.cimport_errors.get(anal_unit)) |errors| { + for (errors.getMessages()) |err_msg_index| { + const err_msg = errors.getErrorMessage(err_msg_index); + try bundle.addRootErrorMessage(.{ + .msg = try bundle.addString(errors.nullTerminatedString(err_msg.msg)), + .src_loc = if (err_msg.src_loc != .none) blk: { + const src_loc = errors.getSourceLocation(err_msg.src_loc); + break :blk try bundle.addSourceLocation(.{ + .src_path = try bundle.addString(errors.nullTerminatedString(src_loc.src_path)), + .span_start = src_loc.span_start, + .span_main = src_loc.span_main, + .span_end = src_loc.span_end, + .line = src_loc.line, + .column = src_loc.column, + .source_line = if (src_loc.source_line != 0) try bundle.addString(errors.nullTerminatedString(src_loc.source_line)) else 0, + }); + } else .none, + }); } } } - if (module.emit_h) |emit_h| { + if (zcu.emit_h) |emit_h| { for (emit_h.failed_decls.keys(), emit_h.failed_decls.values()) |decl_index, error_msg| { // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. - if (module.declFileScope(decl_index).okToReportErrors()) { - try addModuleErrorMsg(module, &bundle, error_msg.*); + if (zcu.declFileScope(decl_index).okToReportErrors()) { + try addModuleErrorMsg(zcu, &bundle, error_msg.*); } } } - for (module.failed_exports.values()) |value| { - try addModuleErrorMsg(module, &bundle, value.*); + for (zcu.failed_exports.values()) |value| { + try addModuleErrorMsg(zcu, &bundle, value.*); } - const actual_error_count = module.global_error_set.entries.len - 1; - if (actual_error_count > module.error_limit) { + const actual_error_count = zcu.global_error_set.entries.len - 1; + if (actual_error_count > zcu.error_limit) { try bundle.addRootErrorMessage(.{ - .msg = try bundle.printString("module used more errors than possible: used {d}, max {d}", .{ - actual_error_count, module.error_limit, + .msg = try bundle.printString("ZCU used more errors than possible: used {d}, max {d}", .{ + actual_error_count, zcu.error_limit, }), .notes_len = 1, }); @@ -3041,14 +3050,14 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } if (comp.module) |zcu| { - if (bundle.root_list.items.len == 0 and zcu.compile_log_decls.count() != 0) { - const values = zcu.compile_log_decls.values(); + if (bundle.root_list.items.len == 0 and zcu.compile_log_sources.count() != 0) { + const values = zcu.compile_log_sources.values(); // First one will be the error; subsequent ones will be notes. const src_loc = values[0].src().upgrade(zcu); const err_msg: Module.ErrorMsg = .{ .src_loc = src_loc, .msg = "found compile log statement", - .notes = try gpa.alloc(Module.ErrorMsg, zcu.compile_log_decls.count() - 1), + .notes = try gpa.alloc(Module.ErrorMsg, zcu.compile_log_sources.count() - 1), }; defer gpa.free(err_msg.notes); @@ -3486,13 +3495,16 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo const decl = module.declPtr(decl_index); const lf = comp.bin_file.?; lf.updateDeclLineNumber(module, decl_index) catch |err| { - try module.failed_decls.ensureUnusedCapacity(gpa, 1); - module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( - gpa, - decl.navSrcLoc(module).upgrade(module), - "unable to update line number: {s}", - .{@errorName(err)}, - )); + try module.failed_analysis.ensureUnusedCapacity(gpa, 1); + module.failed_analysis.putAssumeCapacityNoClobber( + InternPool.AnalUnit.wrap(.{ .decl = decl_index }), + try Module.ErrorMsg.create( + gpa, + decl.navSrcLoc(module).upgrade(module), + "unable to update line number: {s}", + .{@errorName(err)}, + ), + ); decl.analysis = .codegen_failure; try module.retryable_failures.append(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); }; diff --git a/src/Sema.zig b/src/Sema.zig index fafde99f47..4337ce8926 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2486,7 +2486,7 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error crash_report.compilerPanic("unexpected compile error occurred", null, null); } - try mod.failed_decls.ensureUnusedCapacity(gpa, 1); + try mod.failed_analysis.ensureUnusedCapacity(gpa, 1); try mod.failed_files.ensureUnusedCapacity(gpa, 1); if (block) |start_block| { @@ -2504,7 +2504,7 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error const max_references = refs: { if (mod.comp.reference_trace) |num| break :refs num; // Do not add multiple traces without explicit request. - if (mod.failed_decls.count() > 0) break :ref; + if (mod.failed_analysis.count() > 0) break :ref; break :refs default_reference_trace_len; }; @@ -2544,7 +2544,7 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error if (sema.func_index != .none) { ip.funcAnalysis(sema.func_index).state = .sema_failure; } - const gop = mod.failed_decls.getOrPutAssumeCapacity(sema.owner_decl_index); + const gop = mod.failed_analysis.getOrPutAssumeCapacity(sema.ownerUnit()); if (gop.found_existing) { // If there are multiple errors for the same Decl, prefer the first one added. sema.err = null; @@ -5823,11 +5823,7 @@ fn zirCompileLog( } try writer.print("\n", .{}); - const decl_index = if (sema.func_index != .none) - mod.funcOwnerDeclIndex(sema.func_index) - else - sema.owner_decl_index; - const gop = try mod.compile_log_decls.getOrPut(sema.gpa, decl_index); + const gop = try mod.compile_log_sources.getOrPut(sema.gpa, sema.ownerUnit()); if (!gop.found_existing) gop.value_ptr.* = .{ .base_node_inst = block.src_base_inst, .node_offset = src_node, @@ -5980,7 +5976,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr if (!comp.config.link_libc) try sema.errNote(src, msg, "libc headers not available; compilation does not link against libc", .{}); - const gop = try mod.cimport_errors.getOrPut(gpa, sema.owner_decl_index); + const gop = try mod.cimport_errors.getOrPut(gpa, sema.ownerUnit()); if (!gop.found_existing) { gop.value_ptr.* = c_import_res.errors; c_import_res.errors = std.zig.ErrorBundle.empty; @@ -38487,10 +38483,7 @@ pub fn flushExports(sema: *Sema) !void { const zcu = sema.mod; const gpa = zcu.gpa; - const unit: AnalUnit = if (sema.owner_func_index != .none) - AnalUnit.wrap(.{ .func = sema.owner_func_index }) - else - AnalUnit.wrap(.{ .decl = sema.owner_decl_index }); + const unit = sema.ownerUnit(); // There may be existing exports. For instance, a struct may export // things during both field type resolution and field default resolution. @@ -38524,6 +38517,14 @@ pub fn flushExports(sema: *Sema) !void { } } +pub fn ownerUnit(sema: Sema) AnalUnit { + if (sema.owner_func_index != .none) { + return AnalUnit.wrap(.{ .func = sema.owner_func_index }); + } else { + return AnalUnit.wrap(.{ .decl = sema.owner_decl_index }); + } +} + pub const bitCastVal = @import("Sema/bitcast.zig").bitCast; pub const bitCastSpliceVal = @import("Sema/bitcast.zig").bitCastSplice; diff --git a/src/Zcu.zig b/src/Zcu.zig index 3a329f0b03..d29d2e4279 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -108,15 +108,11 @@ embed_table: std.StringArrayHashMapUnmanaged(*EmbedFile) = .{}, /// is not yet implemented. intern_pool: InternPool = .{}, -/// We optimize memory usage for a compilation with no compile errors by storing the -/// error messages and mapping outside of `Decl`. -/// The ErrorMsg memory is owned by the decl, using Module's general purpose allocator. -/// Note that a Decl can succeed but the Fn it represents can fail. In this case, -/// a Decl can have a failed_decls entry but have analysis status of success. -failed_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, *ErrorMsg) = .{}, -/// Keep track of one `@compileLog` callsite per owner Decl. +/// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator. +failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, *ErrorMsg) = .{}, +/// Keep track of one `@compileLog` callsite per `AnalUnit`. /// The value is the source location of the `@compileLog` call, convertible to a `LazySrcLoc`. -compile_log_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, extern struct { +compile_log_sources: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct { base_node_inst: InternPool.TrackedInst.Index, node_offset: i32, pub fn src(self: @This()) LazySrcLoc { @@ -133,9 +129,9 @@ failed_files: std.AutoArrayHashMapUnmanaged(*File, ?*ErrorMsg) = .{}, failed_embed_files: std.AutoArrayHashMapUnmanaged(*EmbedFile, *ErrorMsg) = .{}, /// Key is index into `all_exports`. failed_exports: std.AutoArrayHashMapUnmanaged(u32, *ErrorMsg) = .{}, -/// If a decl failed due to a cimport error, the corresponding Clang errors +/// If analysis failed due to a cimport error, the corresponding Clang errors /// are stored here. -cimport_errors: std.AutoArrayHashMapUnmanaged(Decl.Index, std.zig.ErrorBundle) = .{}, +cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = .{}, /// Key is the error name, index is the error tag value. Index 0 has a length-0 string. global_error_set: GlobalErrorSet = .{}, @@ -180,6 +176,7 @@ emit_h: ?*GlobalEmitH, test_functions: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, +/// TODO: the key here will be a `Cau.Index`. global_assembly: std.AutoArrayHashMapUnmanaged(Decl.Index, []u8) = .{}, reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct { @@ -371,9 +368,9 @@ pub const Decl = struct { /// successfully complete semantic analysis. dependency_failure, /// Semantic analysis failure. - /// There will be a corresponding ErrorMsg in Zcu.failed_decls. + /// There will be a corresponding ErrorMsg in Zcu.failed_analysis. sema_failure, - /// There will be a corresponding ErrorMsg in Zcu.failed_decls. + /// There will be a corresponding ErrorMsg in Zcu.failed_analysis. codegen_failure, /// Sematic analysis and constant value codegen of this Decl has /// succeeded. However, the Decl may be outdated due to an in-progress @@ -1001,11 +998,6 @@ pub const EmbedFile = struct { /// This struct holds data necessary to construct API-facing `AllErrors.Message`. /// Its memory is managed with the general purpose allocator so that they /// can be created and destroyed in response to incremental updates. -/// In some cases, the File could have been inferred from where the ErrorMsg -/// is stored. For example, if it is stored in Module.failed_decls, then the File -/// would be determined by the Decl Scope. However, the data structure contains the field -/// anyway so that `ErrorMsg` can be reused for error notes, which may be in a different -/// file than the parent error message. It also simplifies processing of error messages. pub const ErrorMsg = struct { src_loc: SrcLoc, msg: []const u8, @@ -2454,8 +2446,6 @@ pub fn deinit(zcu: *Zcu) void { for (zcu.import_table.keys()) |key| { gpa.free(key); } - var failed_decls = zcu.failed_decls; - zcu.failed_decls = .{}; for (zcu.import_table.values()) |value| { value.destroy(zcu); } @@ -2473,10 +2463,10 @@ pub fn deinit(zcu: *Zcu) void { zcu.local_zir_cache.handle.close(); zcu.global_zir_cache.handle.close(); - for (failed_decls.values()) |value| { + for (zcu.failed_analysis.values()) |value| { value.destroy(gpa); } - failed_decls.deinit(gpa); + zcu.failed_analysis.deinit(gpa); if (zcu.emit_h) |emit_h| { for (emit_h.failed_decls.values()) |value| { @@ -2507,7 +2497,7 @@ pub fn deinit(zcu: *Zcu) void { } zcu.cimport_errors.deinit(gpa); - zcu.compile_log_decls.deinit(gpa); + zcu.compile_log_sources.deinit(gpa); zcu.all_exports.deinit(gpa); zcu.free_exports.deinit(gpa); @@ -3508,9 +3498,9 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { error.GenericPoison => unreachable, else => |e| { decl.analysis = .sema_failure; - try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); + try mod.failed_analysis.ensureUnusedCapacity(mod.gpa, 1); try mod.retryable_failures.append(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index })); - mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( + mod.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try ErrorMsg.create( mod.gpa, decl.navSrcLoc(mod).upgrade(mod), "unable to analyze: {s}", @@ -3683,9 +3673,9 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In verify.verify() catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => { - try zcu.failed_decls.ensureUnusedCapacity(gpa, 1); - zcu.failed_decls.putAssumeCapacityNoClobber( - decl_index, + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + zcu.failed_analysis.putAssumeCapacityNoClobber( + AnalUnit.wrap(.{ .decl = decl_index }), try Module.ErrorMsg.create( gpa, decl.navSrcLoc(zcu).upgrade(zcu), @@ -3709,8 +3699,8 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In func.analysis(ip).state = .codegen_failure; }, else => { - try zcu.failed_decls.ensureUnusedCapacity(gpa, 1); - zcu.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try Module.ErrorMsg.create( gpa, decl.navSrcLoc(zcu).upgrade(zcu), "unable to codegen: {s}", @@ -5647,8 +5637,8 @@ pub fn linkerUpdateDecl(zcu: *Zcu, decl_index: Decl.Index) !void { }, else => { const gpa = zcu.gpa; - try zcu.failed_decls.ensureUnusedCapacity(gpa, 1); - zcu.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try ErrorMsg.create( gpa, decl.navSrcLoc(zcu).upgrade(zcu), "unable to codegen: {s}", diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index dd6606ece7..6fe7adf33c 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1689,7 +1689,7 @@ pub const Object = struct { fg.genBody(air.getMainBody()) catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; - try zcu.failed_decls.put(zcu.gpa, decl_index, dg.err_msg.?); + try zcu.failed_analysis.put(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?); dg.err_msg = null; return; }, @@ -1710,7 +1710,7 @@ pub const Object = struct { dg.genDecl() catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, dg.err_msg.?); + try module.failed_analysis.put(module.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?); dg.err_msg = null; return; }, diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index ee163c3154..54b7b381cf 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -218,7 +218,7 @@ pub const Object = struct { decl_gen.genDecl() catch |err| switch (err) { error.CodegenFail => { - try mod.failed_decls.put(mod.gpa, decl_index, decl_gen.error_msg.?); + try mod.failed_analysis.put(mod.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), decl_gen.error_msg.?); }, else => |other| { // There might be an error that happened *after* self.error_msg diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 0244d085b8..94b9ca520e 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1155,7 +1155,7 @@ pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air: .ok => code_buffer.items, .fail => |em| { func.analysis(&mod.intern_pool).state = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -1183,7 +1183,7 @@ pub fn lowerUnnamedConst(self: *Coff, val: Value, decl_index: InternPool.DeclInd .ok => |atom_index| atom_index, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); log.err("{s}", .{em.msg}); return error.CodegenFail; }, @@ -1277,7 +1277,7 @@ pub fn updateDecl( .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -2751,6 +2751,7 @@ const TableSection = @import("table_section.zig").TableSection; const StringTable = @import("StringTable.zig"); const Type = @import("../type.zig").Type; const Value = @import("../Value.zig"); +const AnalUnit = InternPool.AnalUnit; pub const base_tag: link.File.Tag = .coff; diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 14040767b1..74e2039f37 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -1096,7 +1096,7 @@ pub fn updateFunc( .ok => code_buffer.items, .fail => |em| { func.analysis(&mod.intern_pool).state = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -1170,7 +1170,7 @@ pub fn updateDecl( .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -1307,7 +1307,7 @@ pub fn lowerUnnamedConst( .ok => |sym_index| sym_index, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); log.err("{s}", .{em.msg}); return error.CodegenFail; }, @@ -1656,4 +1656,5 @@ const Symbol = @import("Symbol.zig"); const StringTable = @import("../StringTable.zig"); const Type = @import("../../type.zig").Type; const Value = @import("../../Value.zig"); +const AnalUnit = InternPool.AnalUnit; const ZigObject = @This(); diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index 1fce9e37dd..ee5ab83b0a 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -694,7 +694,7 @@ pub fn updateFunc( .ok => code_buffer.items, .fail => |em| { func.analysis(&mod.intern_pool).state = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -762,7 +762,7 @@ pub fn updateDecl( .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -1105,7 +1105,7 @@ pub fn lowerUnnamedConst( .ok => |sym_index| sym_index, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); log.err("{s}", .{em.msg}); return error.CodegenFail; }, @@ -1596,4 +1596,5 @@ const Symbol = @import("Symbol.zig"); const StringTable = @import("../StringTable.zig"); const Type = @import("../../type.zig").Type; const Value = @import("../../Value.zig"); +const AnalUnit = InternPool.AnalUnit; const ZigObject = @This(); diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 60775ac662..d44da5c973 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -17,6 +17,7 @@ const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); const Type = @import("../type.zig").Type; const Value = @import("../Value.zig"); +const AnalUnit = InternPool.AnalUnit; const std = @import("std"); const builtin = @import("builtin"); @@ -449,7 +450,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air: .ok => try code_buffer.toOwnedSlice(), .fail => |em| { func.analysis(&mod.intern_pool).state = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -513,7 +514,7 @@ pub fn lowerUnnamedConst(self: *Plan9, val: Value, decl_index: InternPool.DeclIn .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); log.err("{s}", .{em.msg}); return error.CodegenFail; }, @@ -550,7 +551,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex) .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index a3b8eb4459..341d3a2fc8 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -280,7 +280,7 @@ pub fn updateDecl( .ok => code_writer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -320,7 +320,7 @@ pub fn updateFunc( .ok => code_writer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -501,7 +501,7 @@ pub fn lowerUnnamedConst(zig_object: *ZigObject, wasm_file: *Wasm, val: Value, d }, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return error.CodegenFail; }, } @@ -1255,4 +1255,5 @@ const Symbol = @import("Symbol.zig"); const Type = @import("../../type.zig").Type; const Value = @import("../../Value.zig"); const Wasm = @import("../Wasm.zig"); +const AnalUnit = InternPool.AnalUnit; const ZigObject = @This(); diff --git a/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig b/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig index f7de8129b7..6ba1329a2e 100644 --- a/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig +++ b/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig @@ -16,6 +16,7 @@ pub export fn entry() void { // target=native // // :6:5: error: found compile log statement +// :6:5: note: also here // // Compile Log Output: // @as(tmp.Bar, .{ .X = 123 }) diff --git a/test/cases/compile_errors/compile_log.zig b/test/cases/compile_errors/compile_log.zig index 6a14b78b17..ac89cfd1b3 100644 --- a/test/cases/compile_errors/compile_log.zig +++ b/test/cases/compile_errors/compile_log.zig @@ -18,6 +18,7 @@ export fn baz() void { // // :6:5: error: found compile log statement // :12:5: note: also here +// :6:5: note: also here // // Compile Log Output: // @as(*const [5:0]u8, "begin") -- cgit v1.2.3 From ded5c759f83a4da355a128dd4d7f5e22cbd3cabe Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 30 Jun 2024 03:00:07 +0100 Subject: Zcu: store `LazySrcLoc` in error messages This change modifies `Zcu.ErrorMsg` to store a `Zcu.LazySrcLoc` rather than a `Zcu.SrcLoc`. Everything else is dominoes. The reason for this change is incremental compilation. If a failed `AnalUnit` is up-to-date on an update, we want to re-use the old error messages. However, the file containing the error location may have been modified, and `SrcLoc` cannot survive such a modification. `LazySrcLoc` is designed to be correct across incremental updates. Therefore, we defer source location resolution until `Compilation` gathers the compile errors into the `ErrorBundle`. --- src/Compilation.zig | 91 +++++++++++++++++++++++--------------------- src/Sema.zig | 10 ++--- src/Zcu.zig | 78 +++++++++++++++---------------------- src/arch/aarch64/CodeGen.zig | 4 +- src/arch/aarch64/Emit.zig | 2 +- src/arch/arm/CodeGen.zig | 4 +- src/arch/arm/Emit.zig | 2 +- src/arch/riscv64/CodeGen.zig | 4 +- src/arch/riscv64/Lower.zig | 2 +- src/arch/sparc64/CodeGen.zig | 4 +- src/arch/sparc64/Emit.zig | 2 +- src/arch/wasm/CodeGen.zig | 6 +-- src/arch/wasm/Emit.zig | 2 +- src/arch/x86_64/CodeGen.zig | 6 +-- src/arch/x86_64/Lower.zig | 2 +- src/codegen.zig | 22 +++++------ src/codegen/c.zig | 2 +- src/codegen/llvm.zig | 2 +- src/codegen/spirv.zig | 4 +- src/link.zig | 2 +- src/link/Coff.zig | 25 +++++------- src/link/Elf.zig | 2 +- src/link/Elf/ZigObject.zig | 29 ++++++-------- src/link/MachO.zig | 2 +- src/link/MachO/ZigObject.zig | 25 +++++------- src/link/Plan9.zig | 19 +++------ src/link/Wasm.zig | 2 +- src/link/Wasm/ZigObject.zig | 14 +++---- 28 files changed, 162 insertions(+), 207 deletions(-) (limited to 'src/codegen') diff --git a/src/Compilation.zig b/src/Compilation.zig index 3c97cd3145..55084fb971 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2629,22 +2629,24 @@ fn reportMultiModuleErrors(mod: *Module) !void { for (notes[0..num_notes], file.references.items[0..num_notes], 0..) |*note, ref, i| { errdefer for (notes[0..i]) |*n| n.deinit(mod.gpa); note.* = switch (ref) { - .import => |loc| blk: { - break :blk try Module.ErrorMsg.init( - mod.gpa, - loc, - "imported from module {s}", - .{loc.file_scope.mod.fully_qualified_name}, - ); - }, - .root => |pkg| blk: { - break :blk try Module.ErrorMsg.init( - mod.gpa, - .{ .file_scope = file, .base_node = 0, .lazy = .entire_file }, - "root of module {s}", - .{pkg.fully_qualified_name}, - ); - }, + .import => |import| try Module.ErrorMsg.init( + mod.gpa, + .{ + .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, import.file, .main_struct_inst), + .offset = .{ .token_abs = import.token }, + }, + "imported from module {s}", + .{import.file.mod.fully_qualified_name}, + ), + .root => |pkg| try Module.ErrorMsg.init( + mod.gpa, + .{ + .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, file, .main_struct_inst), + .offset = .entire_file, + }, + "root of module {s}", + .{pkg.fully_qualified_name}, + ), }; } errdefer for (notes[0..num_notes]) |*n| n.deinit(mod.gpa); @@ -2652,7 +2654,10 @@ fn reportMultiModuleErrors(mod: *Module) !void { if (omitted > 0) { notes[num_notes] = try Module.ErrorMsg.init( mod.gpa, - .{ .file_scope = file, .base_node = 0, .lazy = .entire_file }, + .{ + .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, file, .main_struct_inst), + .offset = .entire_file, + }, "{} more references omitted", .{omitted}, ); @@ -2661,7 +2666,10 @@ fn reportMultiModuleErrors(mod: *Module) !void { const err = try Module.ErrorMsg.create( mod.gpa, - .{ .file_scope = file, .base_node = 0, .lazy = .entire_file }, + .{ + .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, file, .main_struct_inst), + .offset = .entire_file, + }, "file exists in multiple modules", .{}, ); @@ -3060,7 +3068,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { const values = zcu.compile_log_sources.values(); // First one will be the error; subsequent ones will be notes. - const src_loc = values[0].src().upgrade(zcu); + const src_loc = values[0].src(); const err_msg: Module.ErrorMsg = .{ .src_loc = src_loc, .msg = "found compile log statement", @@ -3070,7 +3078,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { for (values[1..], err_msg.notes) |src_info, *note| { note.* = .{ - .src_loc = src_info.src().upgrade(zcu), + .src_loc = src_info.src(), .msg = "also here", }; } @@ -3139,8 +3147,9 @@ pub fn addModuleErrorMsg( ) !void { const gpa = eb.gpa; const ip = &mod.intern_pool; - const err_source = module_err_msg.src_loc.file_scope.getSource(gpa) catch |err| { - const file_path = try module_err_msg.src_loc.file_scope.fullPath(gpa); + const err_src_loc = module_err_msg.src_loc.upgrade(mod); + const err_source = err_src_loc.file_scope.getSource(gpa) catch |err| { + const file_path = try err_src_loc.file_scope.fullPath(gpa); defer gpa.free(file_path); try eb.addRootErrorMessage(.{ .msg = try eb.printString("unable to load '{s}': {s}", .{ @@ -3149,9 +3158,9 @@ pub fn addModuleErrorMsg( }); return; }; - const err_span = try module_err_msg.src_loc.span(gpa); + const err_span = try err_src_loc.span(gpa); const err_loc = std.zig.findLineColumn(err_source.bytes, err_span.main); - const file_path = try module_err_msg.src_loc.file_scope.fullPath(gpa); + const file_path = try err_src_loc.file_scope.fullPath(gpa); defer gpa.free(file_path); var ref_traces: std.ArrayListUnmanaged(ErrorBundle.ReferenceTrace) = .{}; @@ -3208,7 +3217,7 @@ pub fn addModuleErrorMsg( .span_end = err_span.end, .line = @intCast(err_loc.line), .column = @intCast(err_loc.column), - .source_line = if (module_err_msg.src_loc.lazy == .entire_file) + .source_line = if (err_src_loc.lazy == .entire_file) 0 else try eb.addString(err_loc.source_line), @@ -3225,10 +3234,11 @@ pub fn addModuleErrorMsg( defer notes.deinit(gpa); for (module_err_msg.notes) |module_note| { - const source = try module_note.src_loc.file_scope.getSource(gpa); - const span = try module_note.src_loc.span(gpa); + const note_src_loc = module_note.src_loc.upgrade(mod); + const source = try note_src_loc.file_scope.getSource(gpa); + const span = try note_src_loc.span(gpa); const loc = std.zig.findLineColumn(source.bytes, span.main); - const note_file_path = try module_note.src_loc.file_scope.fullPath(gpa); + const note_file_path = try note_src_loc.file_scope.fullPath(gpa); defer gpa.free(note_file_path); const gop = try notes.getOrPutContext(gpa, .{ @@ -3522,7 +3532,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo InternPool.AnalUnit.wrap(.{ .decl = decl_index }), try Module.ErrorMsg.create( gpa, - decl.navSrcLoc(module).upgrade(module), + decl.navSrcLoc(module), "unable to update line number: {s}", .{@errorName(err)}, ), @@ -4023,9 +4033,8 @@ fn workerAstGenFile( const res = mod.importFile(file, import_path) catch continue; if (!res.is_pkg) { res.file.addReference(mod.*, .{ .import = .{ - .file_scope = file, - .base_node = 0, - .lazy = .{ .token_abs = item.data.token }, + .file = file, + .token = item.data.token, } }) catch continue; } break :blk res; @@ -4398,20 +4407,14 @@ fn reportRetryableAstGenError( file.status = .retryable_failure; - const src_loc: Module.SrcLoc = switch (src) { + const src_loc: Module.LazySrcLoc = switch (src) { .root => .{ - .file_scope = file, - .base_node = 0, - .lazy = .entire_file, + .base_node_inst = try mod.intern_pool.trackZir(gpa, file, .main_struct_inst), + .offset = .entire_file, }, - .import => |info| blk: { - const importing_file = info.importing_file; - - break :blk .{ - .file_scope = importing_file, - .base_node = 0, - .lazy = .{ .token_abs = info.import_tok }, - }; + .import => |info| .{ + .base_node_inst = try mod.intern_pool.trackZir(gpa, info.importing_file, .main_struct_inst), + .offset = .{ .token_abs = info.import_tok }, }, }; diff --git a/src/Sema.zig b/src/Sema.zig index 105fedbec7..9254cf3b8e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2425,8 +2425,7 @@ pub fn errNote( comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { - const zcu = sema.mod; - return zcu.errNoteNonLazy(src.upgrade(zcu), parent, format, args); + return sema.mod.errNote(src, parent, format, args); } fn addFieldErrNote( @@ -2454,7 +2453,7 @@ pub fn errMsg( args: anytype, ) Allocator.Error!*Module.ErrorMsg { assert(src.offset != .unneeded); - return Module.ErrorMsg.create(sema.gpa, src.upgrade(sema.mod), format, args); + return Module.ErrorMsg.create(sema.gpa, src, format, args); } pub fn fail( @@ -2542,7 +2541,6 @@ fn reparentOwnedErrorMsg( args: anytype, ) !void { const mod = sema.mod; - const resolved_src = src.upgrade(mod); const msg_str = try std.fmt.allocPrint(mod.gpa, format, args); const orig_notes = msg.notes.len; @@ -2553,7 +2551,7 @@ fn reparentOwnedErrorMsg( .msg = msg.msg, }; - msg.src_loc = resolved_src; + msg.src_loc = src; msg.msg = msg_str; } @@ -13883,7 +13881,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A return sema.fail(block, operand_src, "file path name cannot be empty", .{}); } - const val = mod.embedFile(block.getFileScope(mod), name, operand_src.upgrade(mod)) catch |err| switch (err) { + const val = mod.embedFile(block.getFileScope(mod), name, operand_src) catch |err| switch (err) { error.ImportOutsideModulePath => { return sema.fail(block, operand_src, "embed of file outside package path: '{s}'", .{name}); }, diff --git a/src/Zcu.zig b/src/Zcu.zig index 4d7508da20..508bef971a 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -289,10 +289,6 @@ pub const Export = struct { section: InternPool.OptionalNullTerminatedString = .none, visibility: std.builtin.SymbolVisibility = .default, }; - - pub fn getSrcLoc(exp: Export, mod: *Module) SrcLoc { - return exp.src.upgrade(mod); - } }; pub const Reference = struct { @@ -746,7 +742,10 @@ pub const File = struct { /// A single reference to a file. pub const Reference = union(enum) { /// The file is imported directly (i.e. not as a package) with @import. - import: SrcLoc, + import: struct { + file: *File, + token: Ast.TokenIndex, + }, /// The file is the root of a module. root: *Package.Module, }; @@ -900,7 +899,7 @@ pub const File = struct { } /// Add a reference to this file during AstGen. - pub fn addReference(file: *File, mod: Module, ref: File.Reference) !void { + pub fn addReference(file: *File, zcu: Zcu, ref: File.Reference) !void { // Don't add the same module root twice. Note that since we always add module roots at the // front of the references array (see below), this loop is actually O(1) on valid code. if (ref == .root) { @@ -917,17 +916,17 @@ pub const File = struct { // to make multi-module errors more helpful (since "root-of" notes are generally more // informative than "imported-from" notes). This path is hit very rarely, so the speed // of the insert operation doesn't matter too much. - .root => try file.references.insert(mod.gpa, 0, ref), + .root => try file.references.insert(zcu.gpa, 0, ref), // Other references we'll just put at the end. - else => try file.references.append(mod.gpa, ref), + else => try file.references.append(zcu.gpa, ref), } - const pkg = switch (ref) { - .import => |loc| loc.file_scope.mod, - .root => |pkg| pkg, + const mod = switch (ref) { + .import => |import| import.file.mod, + .root => |mod| mod, }; - if (pkg != file.mod) file.multi_pkg = true; + if (mod != file.mod) file.multi_pkg = true; } /// Mark this file and every file referenced by it as multi_pkg and report an @@ -967,30 +966,25 @@ pub const EmbedFile = struct { owner: *Package.Module, stat: Cache.File.Stat, val: InternPool.Index, - src_loc: SrcLoc, + src_loc: LazySrcLoc, }; /// This struct holds data necessary to construct API-facing `AllErrors.Message`. /// Its memory is managed with the general purpose allocator so that they /// can be created and destroyed in response to incremental updates. pub const ErrorMsg = struct { - src_loc: SrcLoc, + src_loc: LazySrcLoc, msg: []const u8, notes: []ErrorMsg = &.{}, reference_trace_root: AnalUnit.Optional = .none, - pub const Trace = struct { - decl: InternPool.NullTerminatedString, - src_loc: SrcLoc, - }; - pub fn create( gpa: Allocator, - src_loc: SrcLoc, + src_loc: LazySrcLoc, comptime format: []const u8, args: anytype, ) !*ErrorMsg { - assert(src_loc.lazy != .unneeded); + assert(src_loc.offset != .unneeded); const err_msg = try gpa.create(ErrorMsg); errdefer gpa.destroy(err_msg); err_msg.* = try ErrorMsg.init(gpa, src_loc, format, args); @@ -1006,7 +1000,7 @@ pub const ErrorMsg = struct { pub fn init( gpa: Allocator, - src_loc: SrcLoc, + src_loc: LazySrcLoc, comptime format: []const u8, args: anytype, ) !ErrorMsg { @@ -1994,15 +1988,12 @@ pub const LazySrcLoc = struct { entire_file, /// The source location points to a byte offset within a source file, /// offset from 0. The source file is determined contextually. - /// Inside a `SrcLoc`, the `file_scope` union field will be active. byte_abs: u32, /// The source location points to a token within a source file, /// offset from 0. The source file is determined contextually. - /// Inside a `SrcLoc`, the `file_scope` union field will be active. token_abs: u32, /// The source location points to an AST node within a source file, /// offset from 0. The source file is determined contextually. - /// Inside a `SrcLoc`, the `file_scope` union field will be active. node_abs: u32, /// The source location points to a byte offset within a source file, /// offset from the byte offset of the base node within the file. @@ -2373,8 +2364,7 @@ pub const LazySrcLoc = struct { } /// Resolve the file and AST node of `base_node_inst` to get a resolved `SrcLoc`. - /// TODO: it is incorrect to store a `SrcLoc` anywhere due to incremental compilation. - /// Probably the type should be removed entirely and this resolution performed on-the-fly when needed. + /// The resulting `SrcLoc` should only be used ephemerally, as it is not correct across incremental updates. pub fn upgrade(lazy: LazySrcLoc, zcu: *Zcu) SrcLoc { const file, const base_node = resolveBaseNode(lazy.base_node_inst, zcu); return .{ @@ -3478,7 +3468,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { try mod.retryable_failures.append(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index })); mod.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try ErrorMsg.create( mod.gpa, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), "unable to analyze: {s}", .{@errorName(e)}, )); @@ -3655,7 +3645,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In AnalUnit.wrap(.{ .decl = decl_index }), try Module.ErrorMsg.create( gpa, - decl.navSrcLoc(zcu).upgrade(zcu), + decl.navSrcLoc(zcu), "invalid liveness: {s}", .{@errorName(err)}, ), @@ -3679,7 +3669,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try Module.ErrorMsg.create( gpa, - decl.navSrcLoc(zcu).upgrade(zcu), + decl.navSrcLoc(zcu), "unable to codegen: {s}", .{@errorName(err)}, )); @@ -4480,7 +4470,7 @@ pub fn embedFile( mod: *Module, cur_file: *File, import_string: []const u8, - src_loc: SrcLoc, + src_loc: LazySrcLoc, ) !InternPool.Index { const gpa = mod.gpa; @@ -4555,7 +4545,7 @@ fn newEmbedFile( sub_file_path: []const u8, resolved_path: []const u8, result: **EmbedFile, - src_loc: SrcLoc, + src_loc: LazySrcLoc, ) !InternPool.Index { const gpa = mod.gpa; const ip = &mod.intern_pool; @@ -5320,17 +5310,13 @@ pub fn initNewAnonDecl( new_decl.analysis = .complete; } -pub fn errNoteNonLazy( +pub fn errNote( mod: *Module, - src_loc: SrcLoc, + src_loc: LazySrcLoc, parent: *ErrorMsg, comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { - if (src_loc.lazy == .unneeded) { - assert(parent.src_loc.lazy == .unneeded); - return; - } const msg = try std.fmt.allocPrint(mod.gpa, format, args); errdefer mod.gpa.free(msg); @@ -5458,14 +5444,12 @@ fn processExportsInner( if (gop.found_existing) { new_export.status = .failed_retryable; try zcu.failed_exports.ensureUnusedCapacity(gpa, 1); - const src_loc = new_export.getSrcLoc(zcu); - const msg = try ErrorMsg.create(gpa, src_loc, "exported symbol collision: {}", .{ + const msg = try ErrorMsg.create(gpa, new_export.src, "exported symbol collision: {}", .{ new_export.opts.name.fmt(&zcu.intern_pool), }); errdefer msg.destroy(gpa); const other_export = zcu.all_exports.items[gop.value_ptr.*]; - const other_src_loc = other_export.getSrcLoc(zcu); - try zcu.errNoteNonLazy(other_src_loc, msg, "other symbol here", .{}); + try zcu.errNote(other_export.src, msg, "other symbol here", .{}); zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg); new_export.status = .failed; } else { @@ -5493,8 +5477,7 @@ fn handleUpdateExports( const new_export = &zcu.all_exports.items[export_idx]; new_export.status = .failed_retryable; try zcu.failed_exports.ensureUnusedCapacity(gpa, 1); - const src_loc = new_export.getSrcLoc(zcu); - const msg = try ErrorMsg.create(gpa, src_loc, "unable to export: {s}", .{ + const msg = try ErrorMsg.create(gpa, new_export.src, "unable to export: {s}", .{ @errorName(err), }); zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg); @@ -5658,7 +5641,7 @@ pub fn linkerUpdateDecl(zcu: *Zcu, decl_index: Decl.Index) !void { try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try ErrorMsg.create( gpa, - decl.navSrcLoc(zcu).upgrade(zcu), + decl.navSrcLoc(zcu), "unable to codegen: {s}", .{@errorName(err)}, )); @@ -5685,9 +5668,8 @@ fn reportRetryableFileError( const err_msg = try ErrorMsg.create( mod.gpa, .{ - .file_scope = file, - .base_node = 0, - .lazy = .entire_file, + .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, file, .main_struct_inst), + .offset = .entire_file, }, format, args, diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 48908db51b..51b62aba14 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -59,7 +59,7 @@ args: []MCValue, ret_mcv: MCValue, fn_type: Type, arg_index: u32, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, stack_align: u32, /// MIR Instructions @@ -331,7 +331,7 @@ const Self = @This(); pub fn generate( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig index a783137a54..2588db6adc 100644 --- a/src/arch/aarch64/Emit.zig +++ b/src/arch/aarch64/Emit.zig @@ -22,7 +22,7 @@ bin_file: *link.File, debug_output: DebugInfoOutput, target: *const std.Target, err_msg: ?*ErrorMsg = null, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, code: *std.ArrayList(u8), prev_di_line: u32, diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 60453cebe2..ae802c8f48 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -59,7 +59,7 @@ args: []MCValue, ret_mcv: MCValue, fn_type: Type, arg_index: u32, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, stack_align: u32, /// MIR Instructions @@ -338,7 +338,7 @@ const Self = @This(); pub fn generate( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig index 3a9bfcf4b6..b85deaa3ce 100644 --- a/src/arch/arm/Emit.zig +++ b/src/arch/arm/Emit.zig @@ -26,7 +26,7 @@ bin_file: *link.File, debug_output: DebugInfoOutput, target: *const std.Target, err_msg: ?*ErrorMsg = null, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, code: *std.ArrayList(u8), prev_di_line: u32, diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index a5cdf8621b..2bba63f616 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -59,7 +59,7 @@ args: []MCValue, ret_mcv: InstTracking, fn_type: Type, arg_index: usize, -src_loc: Zcu.SrcLoc, +src_loc: Zcu.LazySrcLoc, /// MIR Instructions mir_instructions: std.MultiArrayList(Mir.Inst) = .{}, @@ -696,7 +696,7 @@ const CallView = enum(u1) { pub fn generate( bin_file: *link.File, - src_loc: Zcu.SrcLoc, + src_loc: Zcu.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, diff --git a/src/arch/riscv64/Lower.zig b/src/arch/riscv64/Lower.zig index dda3f3cf2a..3d3dc8513f 100644 --- a/src/arch/riscv64/Lower.zig +++ b/src/arch/riscv64/Lower.zig @@ -8,7 +8,7 @@ allocator: Allocator, mir: Mir, cc: std.builtin.CallingConvention, err_msg: ?*ErrorMsg = null, -src_loc: Zcu.SrcLoc, +src_loc: Zcu.LazySrcLoc, result_insts_len: u8 = undefined, result_relocs_len: u8 = undefined, result_insts: [ diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 14500ed329..ca1cef1250 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -64,7 +64,7 @@ args: []MCValue, ret_mcv: MCValue, fn_type: Type, arg_index: usize, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, stack_align: Alignment, /// MIR Instructions @@ -263,7 +263,7 @@ const BigTomb = struct { pub fn generate( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig index acd605eebc..b509bb7c79 100644 --- a/src/arch/sparc64/Emit.zig +++ b/src/arch/sparc64/Emit.zig @@ -24,7 +24,7 @@ bin_file: *link.File, debug_output: DebugInfoOutput, target: *const std.Target, err_msg: ?*ErrorMsg = null, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, code: *std.ArrayList(u8), prev_di_line: u32, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index b1ebf9126d..91d637c765 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -765,7 +765,7 @@ pub fn deinit(func: *CodeGen) void { /// Sets `err_msg` on `CodeGen` and returns `error.CodegenFail` which is caught in link/Wasm.zig fn fail(func: *CodeGen, comptime fmt: []const u8, args: anytype) InnerError { const mod = func.bin_file.base.comp.module.?; - const src_loc = func.decl.navSrcLoc(mod).upgrade(mod); + const src_loc = func.decl.navSrcLoc(mod); func.err_msg = try Zcu.ErrorMsg.create(func.gpa, src_loc, fmt, args); return error.CodegenFail; } @@ -1202,7 +1202,7 @@ fn genFunctype( pub fn generate( bin_file: *link.File, - src_loc: Zcu.SrcLoc, + src_loc: Zcu.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, @@ -3162,7 +3162,7 @@ fn lowerAnonDeclRef( } const decl_align = mod.intern_pool.indexToKey(anon_decl.orig_ty).ptr_type.flags.alignment; - const res = try func.bin_file.lowerAnonDecl(decl_val, decl_align, func.decl.navSrcLoc(mod).upgrade(mod)); + const res = try func.bin_file.lowerAnonDecl(decl_val, decl_align, func.decl.navSrcLoc(mod)); switch (res) { .ok => {}, .fail => |em| { diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig index c41ea9ec55..73ef723345 100644 --- a/src/arch/wasm/Emit.zig +++ b/src/arch/wasm/Emit.zig @@ -257,7 +257,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { const comp = emit.bin_file.base.comp; const zcu = comp.module.?; const gpa = comp.gpa; - emit.error_msg = try Zcu.ErrorMsg.create(gpa, zcu.declPtr(emit.decl_index).navSrcLoc(zcu).upgrade(zcu), format, args); + emit.error_msg = try Zcu.ErrorMsg.create(gpa, zcu.declPtr(emit.decl_index).navSrcLoc(zcu), format, args); return error.EmitFail; } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index ea6f0f8a4e..31ed0bf514 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -74,7 +74,7 @@ va_info: union { ret_mcv: InstTracking, fn_type: Type, arg_index: u32, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, eflags_inst: ?Air.Inst.Index = null, @@ -795,7 +795,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, @@ -971,7 +971,7 @@ pub fn generate( pub fn generateLazy( bin_file: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, lazy_sym: link.File.LazySymbol, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index 058a0550d9..852d19132d 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -8,7 +8,7 @@ allocator: Allocator, mir: Mir, cc: std.builtin.CallingConvention, err_msg: ?*ErrorMsg = null, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, result_insts_len: u8 = undefined, result_relocs_len: u8 = undefined, result_insts: [ diff --git a/src/codegen.zig b/src/codegen.zig index b8662ed15b..769e8f7cd5 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -47,7 +47,7 @@ pub const DebugInfoOutput = union(enum) { pub fn generateFunction( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, @@ -79,7 +79,7 @@ pub fn generateFunction( pub fn generateLazyFunction( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, lazy_sym: link.File.LazySymbol, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -105,7 +105,7 @@ fn writeFloat(comptime F: type, f: F, target: Target, endian: std.builtin.Endian pub fn generateLazySymbol( bin_file: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, lazy_sym: link.File.LazySymbol, // TODO don't use an "out" parameter like this; put it in the result instead alignment: *Alignment, @@ -171,7 +171,7 @@ pub fn generateLazySymbol( pub fn generateSymbol( bin_file: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, val: Value, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -618,7 +618,7 @@ pub fn generateSymbol( fn lowerPtr( bin_file: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ptr_val: InternPool.Index, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -683,7 +683,7 @@ const RelocInfo = struct { fn lowerAnonDeclRef( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -730,7 +730,7 @@ fn lowerAnonDeclRef( fn lowerDeclRef( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, decl_index: InternPool.DeclIndex, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -814,7 +814,7 @@ pub const GenResult = union(enum) { fn fail( gpa: Allocator, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, comptime format: []const u8, args: anytype, ) Allocator.Error!GenResult { @@ -825,7 +825,7 @@ pub const GenResult = union(enum) { fn genDeclRef( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, val: Value, ptr_decl_index: InternPool.DeclIndex, ) CodeGenError!GenResult { @@ -931,7 +931,7 @@ fn genDeclRef( fn genUnnamedConst( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, val: Value, owner_decl_index: InternPool.DeclIndex, ) CodeGenError!GenResult { @@ -970,7 +970,7 @@ fn genUnnamedConst( pub fn genTypedValue( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, val: Value, owner_decl_index: InternPool.DeclIndex, ) CodeGenError!GenResult { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index a8e58a1055..6bd8bcc6fc 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -637,7 +637,7 @@ pub const DeclGen = struct { const zcu = dg.zcu; const decl_index = dg.pass.decl; const decl = zcu.declPtr(decl_index); - const src_loc = decl.navSrcLoc(zcu).upgrade(zcu); + const src_loc = decl.navSrcLoc(zcu); dg.error_msg = try Zcu.ErrorMsg.create(dg.gpa, src_loc, format, args); return error.AnalysisFail; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 6fe7adf33c..c65158a88a 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -4644,7 +4644,7 @@ pub const DeclGen = struct { const o = dg.object; const gpa = o.gpa; const mod = o.module; - const src_loc = dg.decl.navSrcLoc(mod).upgrade(mod); + const src_loc = dg.decl.navSrcLoc(mod); dg.err_msg = try Module.ErrorMsg.create(gpa, src_loc, "TODO (LLVM): " ++ format, args); return error.CodegenFail; } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 54b7b381cf..494ec0737e 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -415,7 +415,7 @@ const DeclGen = struct { pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); const mod = self.module; - const src_loc = self.module.declPtr(self.decl_index).navSrcLoc(mod).upgrade(mod); + const src_loc = self.module.declPtr(self.decl_index).navSrcLoc(mod); assert(self.error_msg == null); self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args); return error.CodegenFail; @@ -6439,7 +6439,7 @@ const DeclGen = struct { // TODO: Translate proper error locations. assert(as.errors.items.len != 0); assert(self.error_msg == null); - const src_loc = self.module.declPtr(self.decl_index).navSrcLoc(mod).upgrade(mod); + const src_loc = self.module.declPtr(self.decl_index).navSrcLoc(mod); self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{}); const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len); diff --git a/src/link.zig b/src/link.zig index 36a5cb8187..7f108c283f 100644 --- a/src/link.zig +++ b/src/link.zig @@ -646,7 +646,7 @@ pub const File = struct { base: *File, decl_val: InternPool.Index, decl_align: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !LowerResult { if (build_options.only_c) @compileError("unreachable"); switch (base.tag) { diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 94b9ca520e..366ba87509 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1144,7 +1144,7 @@ pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air: const res = try codegen.generateFunction( &self.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), func_index, air, liveness, @@ -1179,7 +1179,7 @@ pub fn lowerUnnamedConst(self: *Coff, val: Value, decl_index: InternPool.DeclInd const sym_name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index }); defer gpa.free(sym_name); const ty = val.typeOf(mod); - const atom_index = switch (try self.lowerConst(sym_name, val, ty.abiAlignment(mod), self.rdata_section_index.?, decl.navSrcLoc(mod).upgrade(mod))) { + const atom_index = switch (try self.lowerConst(sym_name, val, ty.abiAlignment(mod), self.rdata_section_index.?, decl.navSrcLoc(mod))) { .ok => |atom_index| atom_index, .fail => |em| { decl.analysis = .codegen_failure; @@ -1197,7 +1197,7 @@ const LowerConstResult = union(enum) { fail: *Module.ErrorMsg, }; -fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: InternPool.Alignment, sect_id: u16, src_loc: Module.SrcLoc) !LowerConstResult { +fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: InternPool.Alignment, sect_id: u16, src_loc: Module.LazySrcLoc) !LowerConstResult { const gpa = self.base.comp.gpa; var code_buffer = std.ArrayList(u8).init(gpa); @@ -1270,7 +1270,7 @@ pub fn updateDecl( defer code_buffer.deinit(); const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; - const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod).upgrade(mod), decl_val, &code_buffer, .none, .{ + const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{ .parent_atom_index = atom.getSymbolIndex().?, }); const code = switch (res) { @@ -1309,14 +1309,7 @@ fn updateLazySymbolAtom( const atom = self.getAtomPtr(atom_index); const local_sym_index = atom.getSymbolIndex().?; - const src = if (sym.ty.srcLocOrNull(mod)) |src| - src.upgrade(mod) - else - Module.SrcLoc{ - .file_scope = undefined, - .base_node = undefined, - .lazy = .unneeded, - }; + const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &self.base, src, @@ -1560,7 +1553,7 @@ pub fn updateExports( }, .value => |value| self.anon_decls.getPtr(value) orelse blk: { const first_exp = mod.all_exports.items[export_indices[0]]; - const res = try self.lowerAnonDecl(value, .none, first_exp.getSrcLoc(mod)); + const res = try self.lowerAnonDecl(value, .none, first_exp.src); switch (res) { .ok => {}, .fail => |em| { @@ -1585,7 +1578,7 @@ pub fn updateExports( if (!mem.eql(u8, section_name, ".text")) { try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create( gpa, - exp.getSrcLoc(mod), + exp.src, "Unimplemented: ExportOptions.section", .{}, )); @@ -1596,7 +1589,7 @@ pub fn updateExports( if (exp.opts.linkage == .link_once) { try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create( gpa, - exp.getSrcLoc(mod), + exp.src, "Unimplemented: GlobalLinkage.link_once", .{}, )); @@ -1867,7 +1860,7 @@ pub fn lowerAnonDecl( self: *Coff, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { const gpa = self.base.comp.gpa; const mod = self.base.comp.module.?; diff --git a/src/link/Elf.zig b/src/link/Elf.zig index df8e6c0dd8..c1df153083 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -552,7 +552,7 @@ pub fn lowerAnonDecl( self: *Elf, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { return self.zigObjectPtr().?.lowerAnonDecl(self, decl_val, explicit_alignment, src_loc); } diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 74e2039f37..57fa610019 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -686,7 +686,7 @@ pub fn lowerAnonDecl( elf_file: *Elf, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { const gpa = elf_file.base.comp.gpa; const mod = elf_file.base.comp.module.?; @@ -1074,7 +1074,7 @@ pub fn updateFunc( const res = if (decl_state) |*ds| try codegen.generateFunction( &elf_file.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), func_index, air, liveness, @@ -1084,7 +1084,7 @@ pub fn updateFunc( else try codegen.generateFunction( &elf_file.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), func_index, air, liveness, @@ -1156,13 +1156,13 @@ pub fn updateDecl( // TODO implement .debug_info for global variables const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; const res = if (decl_state) |*ds| - try codegen.generateSymbol(&elf_file.base, decl.navSrcLoc(mod).upgrade(mod), decl_val, &code_buffer, .{ + try codegen.generateSymbol(&elf_file.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .{ .dwarf = ds, }, .{ .parent_atom_index = sym_index, }) else - try codegen.generateSymbol(&elf_file.base, decl.navSrcLoc(mod).upgrade(mod), decl_val, &code_buffer, .none, .{ + try codegen.generateSymbol(&elf_file.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{ .parent_atom_index = sym_index, }); @@ -1217,14 +1217,7 @@ fn updateLazySymbol( break :blk try self.strtab.insert(gpa, name); }; - const src = if (sym.ty.srcLocOrNull(mod)) |src| - src.upgrade(mod) - else - Module.SrcLoc{ - .file_scope = undefined, - .base_node = undefined, - .lazy = .unneeded, - }; + const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &elf_file.base, src, @@ -1302,7 +1295,7 @@ pub fn lowerUnnamedConst( val, ty.abiAlignment(mod), elf_file.zig_data_rel_ro_section_index.?, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), )) { .ok => |sym_index| sym_index, .fail => |em| { @@ -1329,7 +1322,7 @@ fn lowerConst( val: Value, required_alignment: InternPool.Alignment, output_section_index: u32, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !LowerConstResult { const gpa = elf_file.base.comp.gpa; @@ -1395,7 +1388,7 @@ pub fn updateExports( }, .value => |value| self.anon_decls.getPtr(value) orelse blk: { const first_exp = mod.all_exports.items[export_indices[0]]; - const res = try self.lowerAnonDecl(elf_file, value, .none, first_exp.getSrcLoc(mod)); + const res = try self.lowerAnonDecl(elf_file, value, .none, first_exp.src); switch (res) { .ok => {}, .fail => |em| { @@ -1421,7 +1414,7 @@ pub fn updateExports( try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Module.ErrorMsg.create( gpa, - exp.getSrcLoc(mod), + exp.src, "Unimplemented: ExportOptions.section", .{}, )); @@ -1436,7 +1429,7 @@ pub fn updateExports( try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Module.ErrorMsg.create( gpa, - exp.getSrcLoc(mod), + exp.src, "Unimplemented: GlobalLinkage.LinkOnce", .{}, )); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 3187ba528b..ed20a16abf 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -3228,7 +3228,7 @@ pub fn lowerAnonDecl( self: *MachO, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { return self.getZigObject().?.lowerAnonDecl(self, decl_val, explicit_alignment, src_loc); } diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index ee5ab83b0a..861ced9214 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -572,7 +572,7 @@ pub fn lowerAnonDecl( macho_file: *MachO, decl_val: InternPool.Index, explicit_alignment: Atom.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { const gpa = macho_file.base.comp.gpa; const mod = macho_file.base.comp.module.?; @@ -682,7 +682,7 @@ pub fn updateFunc( const dio: codegen.DebugInfoOutput = if (decl_state) |*ds| .{ .dwarf = ds } else .none; const res = try codegen.generateFunction( &macho_file.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), func_index, air, liveness, @@ -754,7 +754,7 @@ pub fn updateDecl( const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; const dio: codegen.DebugInfoOutput = if (decl_state) |*ds| .{ .dwarf = ds } else .none; - const res = try codegen.generateSymbol(&macho_file.base, decl.navSrcLoc(mod).upgrade(mod), decl_val, &code_buffer, dio, .{ + const res = try codegen.generateSymbol(&macho_file.base, decl.navSrcLoc(mod), decl_val, &code_buffer, dio, .{ .parent_atom_index = sym_index, }); @@ -1100,7 +1100,7 @@ pub fn lowerUnnamedConst( val, val.typeOf(mod).abiAlignment(mod), macho_file.zig_const_sect_index.?, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), )) { .ok => |sym_index| sym_index, .fail => |em| { @@ -1127,7 +1127,7 @@ fn lowerConst( val: Value, required_alignment: Atom.Alignment, output_section_index: u8, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !LowerConstResult { const gpa = macho_file.base.comp.gpa; @@ -1196,7 +1196,7 @@ pub fn updateExports( }, .value => |value| self.anon_decls.getPtr(value) orelse blk: { const first_exp = mod.all_exports.items[export_indices[0]]; - const res = try self.lowerAnonDecl(macho_file, value, .none, first_exp.getSrcLoc(mod)); + const res = try self.lowerAnonDecl(macho_file, value, .none, first_exp.src); switch (res) { .ok => {}, .fail => |em| { @@ -1221,7 +1221,7 @@ pub fn updateExports( try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Module.ErrorMsg.create( gpa, - exp.getSrcLoc(mod), + exp.src, "Unimplemented: ExportOptions.section", .{}, )); @@ -1231,7 +1231,7 @@ pub fn updateExports( if (exp.opts.linkage == .link_once) { try mod.failed_exports.putNoClobber(mod.gpa, export_idx, try Module.ErrorMsg.create( gpa, - exp.getSrcLoc(mod), + exp.src, "Unimplemented: GlobalLinkage.link_once", .{}, )); @@ -1291,14 +1291,7 @@ fn updateLazySymbol( break :blk try self.strtab.insert(gpa, name); }; - const src = if (lazy_sym.ty.srcLocOrNull(mod)) |src| - src.upgrade(mod) - else - Module.SrcLoc{ - .file_scope = undefined, - .base_node = undefined, - .lazy = .unneeded, - }; + const src = lazy_sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &macho_file.base, src, diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index d44da5c973..2efe569d98 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -439,7 +439,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air: const res = try codegen.generateFunction( &self.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), func_index, air, liveness, @@ -505,7 +505,7 @@ pub fn lowerUnnamedConst(self: *Plan9, val: Value, decl_index: InternPool.DeclIn }; self.syms.items[info.sym_index.?] = sym; - const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod).upgrade(mod), val, &code_buffer, .{ + const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod), val, &code_buffer, .{ .none = {}, }, .{ .parent_atom_index = new_atom_idx, @@ -544,7 +544,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex) defer code_buffer.deinit(); const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; // TODO we need the symbol index for symbol in the table of locals for the containing atom - const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod).upgrade(mod), decl_val, &code_buffer, .{ .none = {} }, .{ + const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .{ .none = {} }, .{ .parent_atom_index = @as(Atom.Index, @intCast(atom_idx)), }); const code = switch (res) { @@ -1027,7 +1027,7 @@ fn addDeclExports( { try mod.failed_exports.put(mod.gpa, export_idx, try Module.ErrorMsg.create( gpa, - mod.declPtr(decl_index).navSrcLoc(mod).upgrade(mod), + mod.declPtr(decl_index).navSrcLoc(mod), "plan9 does not support extra sections", .{}, )); @@ -1225,14 +1225,7 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind self.syms.items[self.getAtomPtr(atom_index).sym_index.?] = symbol; // generate the code - const src = if (sym.ty.srcLocOrNull(mod)) |src| - src.upgrade(mod) - else - Module.SrcLoc{ - .file_scope = undefined, - .base_node = undefined, - .lazy = .unneeded, - }; + const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &self.base, src, @@ -1553,7 +1546,7 @@ pub fn lowerAnonDecl( self: *Plan9, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { _ = explicit_alignment; // This is basically the same as lowerUnnamedConst. diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 164ddbc118..3befedad89 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1533,7 +1533,7 @@ pub fn lowerAnonDecl( wasm: *Wasm, decl_val: InternPool.Index, explicit_alignment: Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { return wasm.zigObjectPtr().?.lowerAnonDecl(wasm, decl_val, explicit_alignment, src_loc); } diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index 341d3a2fc8..ca950e5cef 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -269,7 +269,7 @@ pub fn updateDecl( const res = try codegen.generateSymbol( &wasm_file.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), val, &code_writer, .none, @@ -308,7 +308,7 @@ pub fn updateFunc( defer code_writer.deinit(); const result = try codegen.generateFunction( &wasm_file.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), func_index, air, liveness, @@ -439,7 +439,7 @@ pub fn lowerAnonDecl( wasm_file: *Wasm, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { const gpa = wasm_file.base.comp.gpa; const gop = try zig_object.anon_decls.getOrPut(gpa, decl_val); @@ -494,7 +494,7 @@ pub fn lowerUnnamedConst(zig_object: *ZigObject, wasm_file: *Wasm, val: Value, d else decl.navSrcLoc(mod); - switch (try zig_object.lowerConst(wasm_file, name, val, decl_src.upgrade(mod))) { + switch (try zig_object.lowerConst(wasm_file, name, val, decl_src)) { .ok => |atom_index| { try wasm_file.getAtomPtr(parent_atom_index).locals.append(gpa, atom_index); return @intFromEnum(wasm_file.getAtom(atom_index).sym_index); @@ -512,7 +512,7 @@ const LowerConstResult = union(enum) { fail: *Module.ErrorMsg, }; -fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: Value, src_loc: Module.SrcLoc) !LowerConstResult { +fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: Value, src_loc: Module.LazySrcLoc) !LowerConstResult { const gpa = wasm_file.base.comp.gpa; const mod = wasm_file.base.comp.module.?; @@ -882,7 +882,7 @@ pub fn updateExports( if (exp.opts.section.toSlice(&mod.intern_pool)) |section| { try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create( gpa, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), "Unimplemented: ExportOptions.section '{s}'", .{section}, )); @@ -915,7 +915,7 @@ pub fn updateExports( .link_once => { try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create( gpa, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), "Unimplemented: LinkOnce", .{}, )); -- cgit v1.2.3 From 2f0f1efa6fa50ca27a44d5f7a0c38a6cafbbfb7c Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 2 Jul 2024 09:51:51 +0100 Subject: compiler: type.zig -> Type.zig --- CMakeLists.txt | 2 +- src/Air.zig | 2 +- src/Compilation.zig | 2 +- src/RangeSet.zig | 2 +- src/Sema.zig | 2 +- src/Sema/bitcast.zig | 2 +- src/Sema/comptime_ptr_access.zig | 2 +- src/Type.zig | 3617 ++++++++++++++++++++++++++++++++++++++ src/Value.zig | 2 +- src/Zcu.zig | 2 +- src/arch/aarch64/CodeGen.zig | 2 +- src/arch/aarch64/abi.zig | 2 +- src/arch/arm/CodeGen.zig | 2 +- src/arch/arm/Emit.zig | 2 +- src/arch/arm/abi.zig | 2 +- src/arch/riscv64/CodeGen.zig | 2 +- src/arch/riscv64/Mir.zig | 2 +- src/arch/riscv64/abi.zig | 2 +- src/arch/sparc64/CodeGen.zig | 2 +- src/arch/wasm/CodeGen.zig | 2 +- src/arch/wasm/abi.zig | 2 +- src/arch/x86_64/CodeGen.zig | 2 +- src/arch/x86_64/abi.zig | 2 +- src/codegen.zig | 2 +- src/codegen/c.zig | 2 +- src/codegen/c/Type.zig | 2 +- src/codegen/llvm.zig | 2 +- src/codegen/spirv.zig | 2 +- src/link.zig | 2 +- src/link/C.zig | 2 +- src/link/Coff.zig | 2 +- src/link/Dwarf.zig | 2 +- src/link/Elf/ZigObject.zig | 2 +- src/link/MachO/DebugSymbols.zig | 2 +- src/link/MachO/ZigObject.zig | 2 +- src/link/Plan9.zig | 2 +- src/link/Wasm.zig | 2 +- src/link/Wasm/ZigObject.zig | 2 +- src/mutable_value.zig | 2 +- src/print_air.zig | 2 +- src/print_value.zig | 2 +- src/register_manager.zig | 2 +- src/target.zig | 2 +- src/type.zig | 3617 -------------------------------------- 44 files changed, 3659 insertions(+), 3659 deletions(-) create mode 100644 src/Type.zig delete mode 100644 src/type.zig (limited to 'src/codegen') diff --git a/CMakeLists.txt b/CMakeLists.txt index 33cdb66b5d..a33df3a096 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -522,6 +522,7 @@ set(ZIG_STAGE2_SOURCES src/Sema.zig src/Sema/bitcast.zig src/Sema/comptime_ptr_access.zig + src/Type.zig src/Value.zig src/Zcu.zig src/arch/aarch64/CodeGen.zig @@ -673,7 +674,6 @@ set(ZIG_STAGE2_SOURCES src/target.zig src/tracy.zig src/translate_c.zig - src/type.zig src/wasi_libc.zig ) diff --git a/src/Air.zig b/src/Air.zig index e70f73432f..0a05470e1c 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -9,7 +9,7 @@ const assert = std.debug.assert; const Air = @This(); const Value = @import("Value.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const InternPool = @import("InternPool.zig"); const Zcu = @import("Zcu.zig"); /// Deprecated. diff --git a/src/Compilation.zig b/src/Compilation.zig index 55084fb971..b964ffd0d1 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -12,7 +12,7 @@ const WaitGroup = std.Thread.WaitGroup; const ErrorBundle = std.zig.ErrorBundle; const Value = @import("Value.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const target_util = @import("target.zig"); const Package = @import("Package.zig"); const link = @import("link.zig"); diff --git a/src/RangeSet.zig b/src/RangeSet.zig index 30b8c273cd..01d9157767 100644 --- a/src/RangeSet.zig +++ b/src/RangeSet.zig @@ -3,7 +3,7 @@ const assert = std.debug.assert; const Order = std.math.Order; const InternPool = @import("InternPool.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Value = @import("Value.zig"); const Zcu = @import("Zcu.zig"); /// Deprecated. diff --git a/src/Sema.zig b/src/Sema.zig index 9254cf3b8e..57b2c897a1 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -176,7 +176,7 @@ const log = std.log.scoped(.sema); const Sema = @This(); const Value = @import("Value.zig"); const MutableValue = @import("mutable_value.zig").MutableValue; -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Air = @import("Air.zig"); const Zir = std.zig.Zir; const Zcu = @import("Zcu.zig"); diff --git a/src/Sema/bitcast.zig b/src/Sema/bitcast.zig index 62a0122fa1..9536ee33cd 100644 --- a/src/Sema/bitcast.zig +++ b/src/Sema/bitcast.zig @@ -767,6 +767,6 @@ const assert = std.debug.assert; const Sema = @import("../Sema.zig"); const Zcu = @import("../Zcu.zig"); const InternPool = @import("../InternPool.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const CompileError = Zcu.CompileError; diff --git a/src/Sema/comptime_ptr_access.zig b/src/Sema/comptime_ptr_access.zig index 59c4c9507d..d8e638ca26 100644 --- a/src/Sema/comptime_ptr_access.zig +++ b/src/Sema/comptime_ptr_access.zig @@ -1054,7 +1054,7 @@ const ComptimeAllocIndex = InternPool.ComptimeAllocIndex; const Sema = @import("../Sema.zig"); const Block = Sema.Block; const MutableValue = @import("../mutable_value.zig").MutableValue; -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const Zcu = @import("../Zcu.zig"); const LazySrcLoc = Zcu.LazySrcLoc; diff --git a/src/Type.zig b/src/Type.zig new file mode 100644 index 0000000000..96c3e055fd --- /dev/null +++ b/src/Type.zig @@ -0,0 +1,3617 @@ +//! Both types and values are canonically represented by a single 32-bit integer +//! which is an index into an `InternPool` data structure. +//! This struct abstracts around this storage by providing methods only +//! applicable to types rather than values in general. + +const std = @import("std"); +const builtin = @import("builtin"); +const Value = @import("Value.zig"); +const assert = std.debug.assert; +const Target = std.Target; +const Zcu = @import("Zcu.zig"); +/// Deprecated. +const Module = Zcu; +const log = std.log.scoped(.Type); +const target_util = @import("target.zig"); +const Sema = @import("Sema.zig"); +const InternPool = @import("InternPool.zig"); +const Alignment = InternPool.Alignment; +const Zir = std.zig.Zir; +const Type = @This(); + +ip_index: InternPool.Index, + +pub fn zigTypeTag(ty: Type, mod: *const Module) std.builtin.TypeId { + return ty.zigTypeTagOrPoison(mod) catch unreachable; +} + +pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId { + return mod.intern_pool.zigTypeTagOrPoison(ty.toIntern()); +} + +pub fn baseZigTypeTag(self: Type, mod: *Module) std.builtin.TypeId { + return switch (self.zigTypeTag(mod)) { + .ErrorUnion => self.errorUnionPayload(mod).baseZigTypeTag(mod), + .Optional => { + return self.optionalChild(mod).baseZigTypeTag(mod); + }, + else => |t| t, + }; +} + +pub fn isSelfComparable(ty: Type, mod: *const Module, is_equality_cmp: bool) bool { + return switch (ty.zigTypeTag(mod)) { + .Int, + .Float, + .ComptimeFloat, + .ComptimeInt, + => true, + + .Vector => ty.elemType2(mod).isSelfComparable(mod, is_equality_cmp), + + .Bool, + .Type, + .Void, + .ErrorSet, + .Fn, + .Opaque, + .AnyFrame, + .Enum, + .EnumLiteral, + => is_equality_cmp, + + .NoReturn, + .Array, + .Struct, + .Undefined, + .Null, + .ErrorUnion, + .Union, + .Frame, + => false, + + .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr(mod)), + .Optional => { + if (!is_equality_cmp) return false; + return ty.optionalChild(mod).isSelfComparable(mod, is_equality_cmp); + }, + }; +} + +/// If it is a function pointer, returns the function type. Otherwise returns null. +pub fn castPtrToFn(ty: Type, mod: *const Module) ?Type { + if (ty.zigTypeTag(mod) != .Pointer) return null; + const elem_ty = ty.childType(mod); + if (elem_ty.zigTypeTag(mod) != .Fn) return null; + return elem_ty; +} + +/// Asserts the type is a pointer. +pub fn ptrIsMutable(ty: Type, mod: *const Module) bool { + return !mod.intern_pool.indexToKey(ty.toIntern()).ptr_type.flags.is_const; +} + +pub const ArrayInfo = struct { + elem_type: Type, + sentinel: ?Value = null, + len: u64, +}; + +pub fn arrayInfo(self: Type, mod: *const Module) ArrayInfo { + return .{ + .len = self.arrayLen(mod), + .sentinel = self.sentinel(mod), + .elem_type = self.childType(mod), + }; +} + +pub fn ptrInfo(ty: Type, mod: *const Module) InternPool.Key.PtrType { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |p| p, + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |p| p, + else => unreachable, + }, + else => unreachable, + }; +} + +pub fn eql(a: Type, b: Type, mod: *const Module) bool { + _ = mod; // TODO: remove this parameter + // The InternPool data structure hashes based on Key to make interned objects + // unique. An Index can be treated simply as u32 value for the + // purpose of Type/Value hashing and equality. + return a.toIntern() == b.toIntern(); +} + +pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { + _ = ty; + _ = unused_fmt_string; + _ = options; + _ = writer; + @compileError("do not format types directly; use either ty.fmtDebug() or ty.fmt()"); +} + +pub const Formatter = std.fmt.Formatter(format2); + +pub fn fmt(ty: Type, module: *Module) Formatter { + return .{ .data = .{ + .ty = ty, + .module = module, + } }; +} + +const FormatContext = struct { + ty: Type, + module: *Module, +}; + +fn format2( + ctx: FormatContext, + comptime unused_format_string: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, +) !void { + comptime assert(unused_format_string.len == 0); + _ = options; + return print(ctx.ty, writer, ctx.module); +} + +pub fn fmtDebug(ty: Type) std.fmt.Formatter(dump) { + return .{ .data = ty }; +} + +/// This is a debug function. In order to print types in a meaningful way +/// we also need access to the module. +pub fn dump( + start_type: Type, + comptime unused_format_string: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, +) @TypeOf(writer).Error!void { + _ = options; + comptime assert(unused_format_string.len == 0); + return writer.print("{any}", .{start_type.ip_index}); +} + +/// Prints a name suitable for `@typeName`. +/// TODO: take an `opt_sema` to pass to `fmtValue` when printing sentinels. +pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void { + const ip = &mod.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| { + const sign_char: u8 = switch (int_type.signedness) { + .signed => 'i', + .unsigned => 'u', + }; + return writer.print("{c}{d}", .{ sign_char, int_type.bits }); + }, + .ptr_type => { + const info = ty.ptrInfo(mod); + + if (info.sentinel != .none) switch (info.flags.size) { + .One, .C => unreachable, + .Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}), + .Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}), + } else switch (info.flags.size) { + .One => try writer.writeAll("*"), + .Many => try writer.writeAll("[*]"), + .C => try writer.writeAll("[*c]"), + .Slice => try writer.writeAll("[]"), + } + if (info.flags.alignment != .none or + info.packed_offset.host_size != 0 or + info.flags.vector_index != .none) + { + const alignment = if (info.flags.alignment != .none) + info.flags.alignment + else + Type.fromInterned(info.child).abiAlignment(mod); + try writer.print("align({d}", .{alignment.toByteUnits() orelse 0}); + + if (info.packed_offset.bit_offset != 0 or info.packed_offset.host_size != 0) { + try writer.print(":{d}:{d}", .{ + info.packed_offset.bit_offset, info.packed_offset.host_size, + }); + } + if (info.flags.vector_index == .runtime) { + try writer.writeAll(":?"); + } else if (info.flags.vector_index != .none) { + try writer.print(":{d}", .{@intFromEnum(info.flags.vector_index)}); + } + try writer.writeAll(") "); + } + if (info.flags.address_space != .generic) { + try writer.print("addrspace(.{s}) ", .{@tagName(info.flags.address_space)}); + } + if (info.flags.is_const) try writer.writeAll("const "); + if (info.flags.is_volatile) try writer.writeAll("volatile "); + if (info.flags.is_allowzero and info.flags.size != .C) try writer.writeAll("allowzero "); + + try print(Type.fromInterned(info.child), writer, mod); + return; + }, + .array_type => |array_type| { + if (array_type.sentinel == .none) { + try writer.print("[{d}]", .{array_type.len}); + try print(Type.fromInterned(array_type.child), writer, mod); + } else { + try writer.print("[{d}:{}]", .{ + array_type.len, + Value.fromInterned(array_type.sentinel).fmtValue(mod, null), + }); + try print(Type.fromInterned(array_type.child), writer, mod); + } + return; + }, + .vector_type => |vector_type| { + try writer.print("@Vector({d}, ", .{vector_type.len}); + try print(Type.fromInterned(vector_type.child), writer, mod); + try writer.writeAll(")"); + return; + }, + .opt_type => |child| { + try writer.writeByte('?'); + return print(Type.fromInterned(child), writer, mod); + }, + .error_union_type => |error_union_type| { + try print(Type.fromInterned(error_union_type.error_set_type), writer, mod); + try writer.writeByte('!'); + if (error_union_type.payload_type == .generic_poison_type) { + try writer.writeAll("anytype"); + } else { + try print(Type.fromInterned(error_union_type.payload_type), writer, mod); + } + return; + }, + .inferred_error_set_type => |func_index| { + try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); + const owner_decl = mod.funcOwnerDeclPtr(func_index); + try owner_decl.renderFullyQualifiedName(mod, writer); + try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); + }, + .error_set_type => |error_set_type| { + const names = error_set_type.names; + try writer.writeAll("error{"); + for (names.get(ip), 0..) |name, i| { + if (i != 0) try writer.writeByte(','); + try writer.print("{}", .{name.fmt(ip)}); + } + try writer.writeAll("}"); + }, + .simple_type => |s| switch (s) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .noreturn, + .adhoc_inferred_error_set, + => return writer.writeAll(@tagName(s)), + + .null, + .undefined, + => try writer.print("@TypeOf({s})", .{@tagName(s)}), + + .enum_literal => try writer.print("@TypeOf(.{s})", .{@tagName(s)}), + .atomic_order => try writer.writeAll("std.builtin.AtomicOrder"), + .atomic_rmw_op => try writer.writeAll("std.builtin.AtomicRmwOp"), + .calling_convention => try writer.writeAll("std.builtin.CallingConvention"), + .address_space => try writer.writeAll("std.builtin.AddressSpace"), + .float_mode => try writer.writeAll("std.builtin.FloatMode"), + .reduce_op => try writer.writeAll("std.builtin.ReduceOp"), + .call_modifier => try writer.writeAll("std.builtin.CallModifier"), + .prefetch_options => try writer.writeAll("std.builtin.PrefetchOptions"), + .export_options => try writer.writeAll("std.builtin.ExportOptions"), + .extern_options => try writer.writeAll("std.builtin.ExternOptions"), + .type_info => try writer.writeAll("std.builtin.Type"), + + .generic_poison => unreachable, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + if (struct_type.decl.unwrap()) |decl_index| { + const decl = mod.declPtr(decl_index); + try decl.renderFullyQualifiedName(mod, writer); + } else if (ip.loadStructType(ty.toIntern()).namespace.unwrap()) |namespace_index| { + const namespace = mod.namespacePtr(namespace_index); + try namespace.renderFullyQualifiedName(mod, .empty, writer); + } else { + try writer.writeAll("@TypeOf(.{})"); + } + }, + .anon_struct_type => |anon_struct| { + if (anon_struct.types.len == 0) { + return writer.writeAll("@TypeOf(.{})"); + } + try writer.writeAll("struct{"); + for (anon_struct.types.get(ip), anon_struct.values.get(ip), 0..) |field_ty, val, i| { + if (i != 0) try writer.writeAll(", "); + if (val != .none) { + try writer.writeAll("comptime "); + } + if (anon_struct.names.len != 0) { + try writer.print("{}: ", .{anon_struct.names.get(ip)[i].fmt(&mod.intern_pool)}); + } + + try print(Type.fromInterned(field_ty), writer, mod); + + if (val != .none) { + try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(mod, null)}); + } + } + try writer.writeAll("}"); + }, + + .union_type => { + const decl = mod.declPtr(ip.loadUnionType(ty.toIntern()).decl); + try decl.renderFullyQualifiedName(mod, writer); + }, + .opaque_type => { + const decl = mod.declPtr(ip.loadOpaqueType(ty.toIntern()).decl); + try decl.renderFullyQualifiedName(mod, writer); + }, + .enum_type => { + const decl = mod.declPtr(ip.loadEnumType(ty.toIntern()).decl); + try decl.renderFullyQualifiedName(mod, writer); + }, + .func_type => |fn_info| { + if (fn_info.is_noinline) { + try writer.writeAll("noinline "); + } + try writer.writeAll("fn ("); + const param_types = fn_info.param_types.get(&mod.intern_pool); + for (param_types, 0..) |param_ty, i| { + if (i != 0) try writer.writeAll(", "); + if (std.math.cast(u5, i)) |index| { + if (fn_info.paramIsComptime(index)) { + try writer.writeAll("comptime "); + } + if (fn_info.paramIsNoalias(index)) { + try writer.writeAll("noalias "); + } + } + if (param_ty == .generic_poison_type) { + try writer.writeAll("anytype"); + } else { + try print(Type.fromInterned(param_ty), writer, mod); + } + } + if (fn_info.is_var_args) { + if (param_types.len != 0) { + try writer.writeAll(", "); + } + try writer.writeAll("..."); + } + try writer.writeAll(") "); + if (fn_info.cc != .Unspecified) { + try writer.writeAll("callconv(."); + try writer.writeAll(@tagName(fn_info.cc)); + try writer.writeAll(") "); + } + if (fn_info.return_type == .generic_poison_type) { + try writer.writeAll("anytype"); + } else { + try print(Type.fromInterned(fn_info.return_type), writer, mod); + } + }, + .anyframe_type => |child| { + if (child == .none) return writer.writeAll("anyframe"); + try writer.writeAll("anyframe->"); + return print(Type.fromInterned(child), writer, mod); + }, + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + } +} + +pub fn fromInterned(i: InternPool.Index) Type { + assert(i != .none); + return .{ .ip_index = i }; +} + +pub fn toIntern(ty: Type) InternPool.Index { + assert(ty.ip_index != .none); + return ty.ip_index; +} + +pub fn toValue(self: Type) Value { + return Value.fromInterned(self.toIntern()); +} + +const RuntimeBitsError = Module.CompileError || error{NeedLazy}; + +/// true if and only if the type takes up space in memory at runtime. +/// There are two reasons a type will return false: +/// * the type is a comptime-only type. For example, the type `type` itself. +/// - note, however, that a struct can have mixed fields and only the non-comptime-only +/// fields will count towards the ABI size. For example, `struct {T: type, x: i32}` +/// hasRuntimeBits()=true and abiSize()=4 +/// * the type has only one possible value, making its ABI size 0. +/// - an enum with an explicit tag type has the ABI size of the integer tag type, +/// making it one-possible-value only if the integer tag type has 0 bits. +/// When `ignore_comptime_only` is true, then types that are comptime-only +/// may return false positives. +pub fn hasRuntimeBitsAdvanced( + ty: Type, + mod: *Module, + ignore_comptime_only: bool, + strat: AbiAlignmentAdvancedStrat, +) RuntimeBitsError!bool { + const ip = &mod.intern_pool; + return switch (ty.toIntern()) { + // False because it is a comptime-only type. + .empty_struct_type => false, + else => switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| int_type.bits != 0, + .ptr_type => { + // Pointers to zero-bit types still have a runtime address; however, pointers + // to comptime-only types do not, with the exception of function pointers. + if (ignore_comptime_only) return true; + return switch (strat) { + .sema => |sema| !(try sema.typeRequiresComptime(ty)), + .eager => !comptimeOnly(ty, mod), + .lazy => error.NeedLazy, + }; + }, + .anyframe_type => true, + .array_type => |array_type| return array_type.lenIncludingSentinel() > 0 and + try Type.fromInterned(array_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + .vector_type => |vector_type| return vector_type.len > 0 and + try Type.fromInterned(vector_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + .opt_type => |child| { + const child_ty = Type.fromInterned(child); + if (child_ty.isNoReturn(mod)) { + // Then the optional is comptime-known to be null. + return false; + } + if (ignore_comptime_only) return true; + return switch (strat) { + .sema => |sema| !(try sema.typeRequiresComptime(child_ty)), + .eager => !comptimeOnly(child_ty, mod), + .lazy => error.NeedLazy, + }; + }, + .error_union_type, + .error_set_type, + .inferred_error_set_type, + => true, + + // These are function *bodies*, not pointers. + // They return false here because they are comptime-only types. + // Special exceptions have to be made when emitting functions due to + // this returning false. + .func_type => false, + + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .bool, + .anyerror, + .adhoc_inferred_error_set, + .anyopaque, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => true, + + // These are false because they are comptime-only types. + .void, + .type, + .comptime_int, + .comptime_float, + .noreturn, + .null, + .undefined, + .enum_literal, + .type_info, + => false, + + .generic_poison => unreachable, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + if (struct_type.assumeRuntimeBitsIfFieldTypesWip(ip)) { + // In this case, we guess that hasRuntimeBits() for this type is true, + // and then later if our guess was incorrect, we emit a compile error. + return true; + } + switch (strat) { + .sema => |sema| _ = try sema.resolveTypeFields(ty), + .eager => assert(struct_type.haveFieldTypes(ip)), + .lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy, + } + for (0..struct_type.field_types.len) |i| { + if (struct_type.comptime_bits.getBit(ip, i)) continue; + const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + return true; + } else { + return false; + } + }, + .anon_struct_type => |tuple| { + for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { + if (val != .none) continue; // comptime field + if (try Type.fromInterned(field_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; + } + return false; + }, + + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + switch (union_type.flagsPtr(ip).runtime_tag) { + .none => { + if (union_type.flagsPtr(ip).status == .field_types_wip) { + // In this case, we guess that hasRuntimeBits() for this type is true, + // and then later if our guess was incorrect, we emit a compile error. + union_type.flagsPtr(ip).assumed_runtime_bits = true; + return true; + } + }, + .safety, .tagged => { + const tag_ty = union_type.tagTypePtr(ip).*; + // tag_ty will be `none` if this union's tag type is not resolved yet, + // in which case we want control flow to continue down below. + if (tag_ty != .none and + try Type.fromInterned(tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + { + return true; + } + }, + } + switch (strat) { + .sema => |sema| _ = try sema.resolveTypeFields(ty), + .eager => assert(union_type.flagsPtr(ip).status.haveFieldTypes()), + .lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes()) + return error.NeedLazy, + } + for (0..union_type.field_types.len) |field_index| { + const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]); + if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + return true; + } else { + return false; + } + }, + + .opaque_type => true, + .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + }; +} + +/// true if and only if the type has a well-defined memory layout +/// readFrom/writeToMemory are supported only for types with a well- +/// defined memory layout +pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .int_type, + .vector_type, + => true, + + .error_union_type, + .error_set_type, + .inferred_error_set_type, + .anon_struct_type, + .opaque_type, + .anyframe_type, + // These are function bodies, not function pointers. + .func_type, + => false, + + .array_type => |array_type| Type.fromInterned(array_type.child).hasWellDefinedLayout(mod), + .opt_type => ty.isPtrLikeOptional(mod), + .ptr_type => |ptr_type| ptr_type.flags.size != .Slice, + + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .bool, + .void, + => true, + + .anyerror, + .adhoc_inferred_error_set, + .anyopaque, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type, + .comptime_int, + .comptime_float, + .noreturn, + .null, + .undefined, + .enum_literal, + .type_info, + .generic_poison, + => false, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + // Struct with no fields have a well-defined layout of no bits. + return struct_type.layout != .auto or struct_type.field_types.len == 0; + }, + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + return switch (union_type.flagsPtr(ip).runtime_tag) { + .none, .safety => union_type.flagsPtr(ip).layout != .auto, + .tagged => false, + }; + }, + .enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) { + .auto => false, + .explicit, .nonexhaustive => true, + }, + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }; +} + +pub fn hasRuntimeBits(ty: Type, mod: *Module) bool { + return hasRuntimeBitsAdvanced(ty, mod, false, .eager) catch unreachable; +} + +pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { + return hasRuntimeBitsAdvanced(ty, mod, true, .eager) catch unreachable; +} + +pub fn fnHasRuntimeBits(ty: Type, mod: *Module) bool { + return ty.fnHasRuntimeBitsAdvanced(mod, null) catch unreachable; +} + +/// Determines whether a function type has runtime bits, i.e. whether a +/// function with this type can exist at runtime. +/// Asserts that `ty` is a function type. +/// If `opt_sema` is not provided, asserts that the return type is sufficiently resolved. +pub fn fnHasRuntimeBitsAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.CompileError!bool { + const fn_info = mod.typeToFunc(ty).?; + if (fn_info.is_generic) return false; + if (fn_info.is_var_args) return true; + if (fn_info.cc == .Inline) return false; + return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(mod, opt_sema); +} + +pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool { + switch (ty.zigTypeTag(mod)) { + .Fn => return ty.fnHasRuntimeBits(mod), + else => return ty.hasRuntimeBits(mod), + } +} + +/// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive. +pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { + return switch (ty.zigTypeTag(mod)) { + .Fn => true, + else => return ty.hasRuntimeBitsIgnoreComptime(mod), + }; +} + +pub fn isNoReturn(ty: Type, mod: *Module) bool { + return mod.intern_pool.isNoReturn(ty.toIntern()); +} + +/// Returns `none` if the pointer is naturally aligned and the element type is 0-bit. +pub fn ptrAlignment(ty: Type, mod: *Module) Alignment { + return ptrAlignmentAdvanced(ty, mod, null) catch unreachable; +} + +pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !Alignment { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| { + if (ptr_type.flags.alignment != .none) + return ptr_type.flags.alignment; + + if (opt_sema) |sema| { + const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .{ .sema = sema }); + return res.scalar; + } + + return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; + }, + .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(mod, opt_sema), + else => unreachable, + }; +} + +pub fn ptrAddressSpace(ty: Type, mod: *const Module) std.builtin.AddressSpace { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.address_space, + .opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.flags.address_space, + else => unreachable, + }; +} + +/// Never returns `none`. Asserts that all necessary type resolution is already done. +pub fn abiAlignment(ty: Type, mod: *Module) Alignment { + return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; +} + +/// May capture a reference to `ty`. +/// Returned value has type `comptime_int`. +pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value { + switch (try ty.abiAlignmentAdvanced(mod, .lazy)) { + .val => |val| return val, + .scalar => |x| return mod.intValue(Type.comptime_int, x.toByteUnits() orelse 0), + } +} + +pub const AbiAlignmentAdvanced = union(enum) { + scalar: Alignment, + val: Value, +}; + +pub const AbiAlignmentAdvancedStrat = union(enum) { + eager, + lazy, + sema: *Sema, +}; + +/// If you pass `eager` you will get back `scalar` and assert the type is resolved. +/// In this case there will be no error, guaranteed. +/// If you pass `lazy` you may get back `scalar` or `val`. +/// If `val` is returned, a reference to `ty` has been captured. +/// If you pass `sema` you will get back `scalar` and resolve the type if +/// necessary, possibly returning a CompileError. +pub fn abiAlignmentAdvanced( + ty: Type, + mod: *Module, + strat: AbiAlignmentAdvancedStrat, +) Module.CompileError!AbiAlignmentAdvanced { + const target = mod.getTarget(); + const use_llvm = mod.comp.config.use_llvm; + const ip = &mod.intern_pool; + + const opt_sema = switch (strat) { + .sema => |sema| sema, + else => null, + }; + + switch (ty.toIntern()) { + .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .@"1" }, + else => switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| { + if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; + return .{ .scalar = intAbiAlignment(int_type.bits, target, use_llvm) }; + }, + .ptr_type, .anyframe_type => { + return .{ .scalar = ptrAbiAlignment(target) }; + }, + .array_type => |array_type| { + return Type.fromInterned(array_type.child).abiAlignmentAdvanced(mod, strat); + }, + .vector_type => |vector_type| { + if (vector_type.len == 0) return .{ .scalar = .@"1" }; + switch (mod.comp.getZigBackend()) { + else => { + const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema)); + if (elem_bits == 0) return .{ .scalar = .@"1" }; + const bytes = ((elem_bits * vector_type.len) + 7) / 8; + const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); + return .{ .scalar = Alignment.fromByteUnits(alignment) }; + }, + .stage2_c => { + return Type.fromInterned(vector_type.child).abiAlignmentAdvanced(mod, strat); + }, + .stage2_x86_64 => { + if (vector_type.child == .bool_type) { + if (vector_type.len > 256 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" }; + if (vector_type.len > 128 and std.Target.x86.featureSetHas(target.cpu.features, .avx2)) return .{ .scalar = .@"32" }; + if (vector_type.len > 64) return .{ .scalar = .@"16" }; + const bytes = std.math.divCeil(u32, vector_type.len, 8) catch unreachable; + const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); + return .{ .scalar = Alignment.fromByteUnits(alignment) }; + } + const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); + if (elem_bytes == 0) return .{ .scalar = .@"1" }; + const bytes = elem_bytes * vector_type.len; + if (bytes > 32 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" }; + if (bytes > 16 and std.Target.x86.featureSetHas(target.cpu.features, .avx)) return .{ .scalar = .@"32" }; + return .{ .scalar = .@"16" }; + }, + } + }, + + .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), + .error_union_type => |info| return abiAlignmentAdvancedErrorUnion(ty, mod, strat, Type.fromInterned(info.payload_type)), + + .error_set_type, .inferred_error_set_type => { + const bits = mod.errorSetBits(); + if (bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; + return .{ .scalar = intAbiAlignment(bits, target, use_llvm) }; + }, + + // represents machine code; not a pointer + .func_type => return .{ .scalar = target_util.defaultFunctionAlignment(target) }, + + .simple_type => |t| switch (t) { + .bool, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .anyopaque, + => return .{ .scalar = .@"1" }, + + .usize, + .isize, + => return .{ .scalar = intAbiAlignment(target.ptrBitWidth(), target, use_llvm) }, + + .export_options, + .extern_options, + .type_info, + => return .{ .scalar = ptrAbiAlignment(target) }, + + .c_char => return .{ .scalar = cTypeAlign(target, .char) }, + .c_short => return .{ .scalar = cTypeAlign(target, .short) }, + .c_ushort => return .{ .scalar = cTypeAlign(target, .ushort) }, + .c_int => return .{ .scalar = cTypeAlign(target, .int) }, + .c_uint => return .{ .scalar = cTypeAlign(target, .uint) }, + .c_long => return .{ .scalar = cTypeAlign(target, .long) }, + .c_ulong => return .{ .scalar = cTypeAlign(target, .ulong) }, + .c_longlong => return .{ .scalar = cTypeAlign(target, .longlong) }, + .c_ulonglong => return .{ .scalar = cTypeAlign(target, .ulonglong) }, + .c_longdouble => return .{ .scalar = cTypeAlign(target, .longdouble) }, + + .f16 => return .{ .scalar = .@"2" }, + .f32 => return .{ .scalar = cTypeAlign(target, .float) }, + .f64 => switch (target.c_type_bit_size(.double)) { + 64 => return .{ .scalar = cTypeAlign(target, .double) }, + else => return .{ .scalar = .@"8" }, + }, + .f80 => switch (target.c_type_bit_size(.longdouble)) { + 80 => return .{ .scalar = cTypeAlign(target, .longdouble) }, + else => { + const u80_ty: Type = .{ .ip_index = .u80_type }; + return .{ .scalar = abiAlignment(u80_ty, mod) }; + }, + }, + .f128 => switch (target.c_type_bit_size(.longdouble)) { + 128 => return .{ .scalar = cTypeAlign(target, .longdouble) }, + else => return .{ .scalar = .@"16" }, + }, + + .anyerror, .adhoc_inferred_error_set => { + const bits = mod.errorSetBits(); + if (bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; + return .{ .scalar = intAbiAlignment(bits, target, use_llvm) }; + }, + + .void, + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + => return .{ .scalar = .@"1" }, + + .noreturn => unreachable, + .generic_poison => unreachable, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + if (struct_type.layout == .@"packed") { + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => if (struct_type.backingIntType(ip).* == .none) return .{ + .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))), + }, + .eager => {}, + } + return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(mod) }; + } + + const flags = struct_type.flagsPtr(ip).*; + if (flags.alignment != .none) return .{ .scalar = flags.alignment }; + + return switch (strat) { + .eager => unreachable, // struct alignment not resolved + .sema => |sema| .{ + .scalar = try sema.resolveStructAlignment(ty.toIntern(), struct_type), + }, + .lazy => .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }, + }; + }, + .anon_struct_type => |tuple| { + var big_align: Alignment = .@"1"; + for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { + if (val != .none) continue; // comptime field + switch (try Type.fromInterned(field_ty).abiAlignmentAdvanced(mod, strat)) { + .scalar => |field_align| big_align = big_align.max(field_align), + .val => switch (strat) { + .eager => unreachable, // field type alignment not resolved + .sema => unreachable, // passed to abiAlignmentAdvanced above + .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }, + }, + } + } + return .{ .scalar = big_align }; + }, + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + const flags = union_type.flagsPtr(ip).*; + if (flags.alignment != .none) return .{ .scalar = flags.alignment }; + + if (!union_type.haveLayout(ip)) switch (strat) { + .eager => unreachable, // union layout not resolved + .sema => |sema| return .{ .scalar = try sema.resolveUnionAlignment(ty, union_type) }, + .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }, + }; + + return .{ .scalar = union_type.flagsPtr(ip).alignment }; + }, + .opaque_type => return .{ .scalar = .@"1" }, + .enum_type => return .{ + .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiAlignment(mod), + }, + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + } +} + +fn abiAlignmentAdvancedErrorUnion( + ty: Type, + mod: *Module, + strat: AbiAlignmentAdvancedStrat, + payload_ty: Type, +) Module.CompileError!AbiAlignmentAdvanced { + // This code needs to be kept in sync with the equivalent switch prong + // in abiSizeAdvanced. + const code_align = abiAlignment(Type.anyerror, mod); + switch (strat) { + .eager, .sema => { + if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }, + else => |e| return e, + })) { + return .{ .scalar = code_align }; + } + return .{ .scalar = code_align.max( + (try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar, + ) }; + }, + .lazy => { + switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) { + .scalar => |payload_align| return .{ .scalar = code_align.max(payload_align) }, + .val => {}, + } + return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }; + }, + } +} + +fn abiAlignmentAdvancedOptional( + ty: Type, + mod: *Module, + strat: AbiAlignmentAdvancedStrat, +) Module.CompileError!AbiAlignmentAdvanced { + const target = mod.getTarget(); + const child_type = ty.optionalChild(mod); + + switch (child_type.zigTypeTag(mod)) { + .Pointer => return .{ .scalar = ptrAbiAlignment(target) }, + .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat), + .NoReturn => return .{ .scalar = .@"1" }, + else => {}, + } + + switch (strat) { + .eager, .sema => { + if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }, + else => |e| return e, + })) { + return .{ .scalar = .@"1" }; + } + return child_type.abiAlignmentAdvanced(mod, strat); + }, + .lazy => switch (try child_type.abiAlignmentAdvanced(mod, strat)) { + .scalar => |x| return .{ .scalar = x.max(.@"1") }, + .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }, + }, + } +} + +/// May capture a reference to `ty`. +pub fn lazyAbiSize(ty: Type, mod: *Module) !Value { + switch (try ty.abiSizeAdvanced(mod, .lazy)) { + .val => |val| return val, + .scalar => |x| return mod.intValue(Type.comptime_int, x), + } +} + +/// Asserts the type has the ABI size already resolved. +/// Types that return false for hasRuntimeBits() return 0. +pub fn abiSize(ty: Type, mod: *Module) u64 { + return (abiSizeAdvanced(ty, mod, .eager) catch unreachable).scalar; +} + +const AbiSizeAdvanced = union(enum) { + scalar: u64, + val: Value, +}; + +/// If you pass `eager` you will get back `scalar` and assert the type is resolved. +/// In this case there will be no error, guaranteed. +/// If you pass `lazy` you may get back `scalar` or `val`. +/// If `val` is returned, a reference to `ty` has been captured. +/// If you pass `sema` you will get back `scalar` and resolve the type if +/// necessary, possibly returning a CompileError. +pub fn abiSizeAdvanced( + ty: Type, + mod: *Module, + strat: AbiAlignmentAdvancedStrat, +) Module.CompileError!AbiSizeAdvanced { + const target = mod.getTarget(); + const use_llvm = mod.comp.config.use_llvm; + const ip = &mod.intern_pool; + + switch (ty.toIntern()) { + .empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 }, + + else => switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| { + if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; + return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target, use_llvm) }; + }, + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, + else => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + }, + .anyframe_type => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + + .array_type => |array_type| { + const len = array_type.lenIncludingSentinel(); + if (len == 0) return .{ .scalar = 0 }; + switch (try Type.fromInterned(array_type.child).abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| return .{ .scalar = len * elem_size }, + .val => switch (strat) { + .sema, .eager => unreachable, + .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + }, + } + }, + .vector_type => |vector_type| { + const opt_sema = switch (strat) { + .sema => |sema| sema, + .eager => null, + .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + }; + const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { + .scalar => |x| x, + .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + }; + const total_bytes = switch (mod.comp.getZigBackend()) { + else => total_bytes: { + const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema); + const total_bits = elem_bits * vector_type.len; + break :total_bytes (total_bits + 7) / 8; + }, + .stage2_c => total_bytes: { + const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); + break :total_bytes elem_bytes * vector_type.len; + }, + .stage2_x86_64 => total_bytes: { + if (vector_type.child == .bool_type) break :total_bytes std.math.divCeil(u32, vector_type.len, 8) catch unreachable; + const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); + break :total_bytes elem_bytes * vector_type.len; + }, + }; + return AbiSizeAdvanced{ .scalar = alignment.forward(total_bytes) }; + }, + + .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), + + .error_set_type, .inferred_error_set_type => { + const bits = mod.errorSetBits(); + if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; + return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target, use_llvm) }; + }, + + .error_union_type => |error_union_type| { + const payload_ty = Type.fromInterned(error_union_type.payload_type); + // This code needs to be kept in sync with the equivalent switch prong + // in abiAlignmentAdvanced. + const code_size = abiSize(Type.anyerror, mod); + if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + else => |e| return e, + })) { + // Same as anyerror. + return AbiSizeAdvanced{ .scalar = code_size }; + } + const code_align = abiAlignment(Type.anyerror, mod); + const payload_align = abiAlignment(payload_ty, mod); + const payload_size = switch (try payload_ty.abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| elem_size, + .val => switch (strat) { + .sema => unreachable, + .eager => unreachable, + .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + }, + }; + + var size: u64 = 0; + if (code_align.compare(.gt, payload_align)) { + size += code_size; + size = payload_align.forward(size); + size += payload_size; + size = code_align.forward(size); + } else { + size += payload_size; + size = code_align.forward(size); + size += code_size; + size = payload_align.forward(size); + } + return AbiSizeAdvanced{ .scalar = size }; + }, + .func_type => unreachable, // represents machine code; not a pointer + .simple_type => |t| switch (t) { + .bool, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + => return AbiSizeAdvanced{ .scalar = 1 }, + + .f16 => return AbiSizeAdvanced{ .scalar = 2 }, + .f32 => return AbiSizeAdvanced{ .scalar = 4 }, + .f64 => return AbiSizeAdvanced{ .scalar = 8 }, + .f128 => return AbiSizeAdvanced{ .scalar = 16 }, + .f80 => switch (target.c_type_bit_size(.longdouble)) { + 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, + else => { + const u80_ty: Type = .{ .ip_index = .u80_type }; + return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) }; + }, + }, + + .usize, + .isize, + => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + + .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, + .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, + .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) }, + .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) }, + .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) }, + .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) }, + .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) }, + .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) }, + .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) }, + .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, + + .anyopaque, + .void, + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + => return AbiSizeAdvanced{ .scalar = 0 }, + + .anyerror, .adhoc_inferred_error_set => { + const bits = mod.errorSetBits(); + if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; + return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target, use_llvm) }; + }, + + .prefetch_options => unreachable, // missing call to resolveTypeFields + .export_options => unreachable, // missing call to resolveTypeFields + .extern_options => unreachable, // missing call to resolveTypeFields + + .type_info => unreachable, + .noreturn => unreachable, + .generic_poison => unreachable, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => switch (struct_type.layout) { + .@"packed" => { + if (struct_type.backingIntType(ip).* == .none) return .{ + .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))), + }; + }, + .auto, .@"extern" => { + if (!struct_type.haveLayout(ip)) return .{ + .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))), + }; + }, + }, + .eager => {}, + } + switch (struct_type.layout) { + .@"packed" => return .{ + .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiSize(mod), + }, + .auto, .@"extern" => { + assert(struct_type.haveLayout(ip)); + return .{ .scalar = struct_type.size(ip).* }; + }, + } + }, + .anon_struct_type => |tuple| { + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy, .eager => {}, + } + const field_count = tuple.types.len; + if (field_count == 0) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; + }, + + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{ + .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))), + }, + .eager => {}, + } + + assert(union_type.haveLayout(ip)); + return .{ .scalar = union_type.size(ip).* }; + }, + .opaque_type => unreachable, // no size available + .enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(mod) }, + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + } +} + +fn abiSizeAdvancedOptional( + ty: Type, + mod: *Module, + strat: AbiAlignmentAdvancedStrat, +) Module.CompileError!AbiSizeAdvanced { + const child_ty = ty.optionalChild(mod); + + if (child_ty.isNoReturn(mod)) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + + if (!(child_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + else => |e| return e, + })) return AbiSizeAdvanced{ .scalar = 1 }; + + if (ty.optionalReprIsPayload(mod)) { + return abiSizeAdvanced(child_ty, mod, strat); + } + + const payload_size = switch (try child_ty.abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| elem_size, + .val => switch (strat) { + .sema => unreachable, + .eager => unreachable, + .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + }, + }; + + // Optional types are represented as a struct with the child type as the first + // field and a boolean as the second. Since the child type's abi alignment is + // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal + // to the child type's ABI alignment. + return AbiSizeAdvanced{ + .scalar = (child_ty.abiAlignment(mod).toByteUnits() orelse 0) + payload_size, + }; +} + +pub fn ptrAbiAlignment(target: Target) Alignment { + return Alignment.fromNonzeroByteUnits(@divExact(target.ptrBitWidth(), 8)); +} + +pub fn intAbiSize(bits: u16, target: Target, use_llvm: bool) u64 { + return intAbiAlignment(bits, target, use_llvm).forward(@as(u16, @intCast((@as(u17, bits) + 7) / 8))); +} + +pub fn intAbiAlignment(bits: u16, target: Target, use_llvm: bool) Alignment { + return switch (target.cpu.arch) { + .x86 => switch (bits) { + 0 => .none, + 1...8 => .@"1", + 9...16 => .@"2", + 17...64 => .@"4", + else => .@"16", + }, + .x86_64 => switch (bits) { + 0 => .none, + 1...8 => .@"1", + 9...16 => .@"2", + 17...32 => .@"4", + 33...64 => .@"8", + else => switch (target_util.zigBackend(target, use_llvm)) { + .stage2_x86_64 => .@"8", + else => .@"16", + }, + }, + else => return Alignment.fromByteUnits(@min( + std.math.ceilPowerOfTwoPromote(u16, @as(u16, @intCast((@as(u17, bits) + 7) / 8))), + maxIntAlignment(target, use_llvm), + )), + }; +} + +pub fn maxIntAlignment(target: std.Target, use_llvm: bool) u16 { + return switch (target.cpu.arch) { + .avr => 1, + .msp430 => 2, + .xcore => 4, + + .arm, + .armeb, + .thumb, + .thumbeb, + .hexagon, + .mips, + .mipsel, + .powerpc, + .powerpcle, + .r600, + .amdgcn, + .riscv32, + .sparc, + .sparcel, + .s390x, + .lanai, + .wasm32, + .wasm64, + => 8, + + // For these, LLVMABIAlignmentOfType(i128) reports 8. Note that 16 + // is a relevant number in three cases: + // 1. Different machine code instruction when loading into SIMD register. + // 2. The C ABI wants 16 for extern structs. + // 3. 16-byte cmpxchg needs 16-byte alignment. + // Same logic for powerpc64, mips64, sparc64. + .powerpc64, + .powerpc64le, + .mips64, + .mips64el, + .sparc64, + => switch (target.ofmt) { + .c => 16, + else => 8, + }, + + .x86_64 => switch (target_util.zigBackend(target, use_llvm)) { + .stage2_x86_64 => 8, + else => 16, + }, + + // Even LLVMABIAlignmentOfType(i128) agrees on these targets. + .x86, + .aarch64, + .aarch64_be, + .aarch64_32, + .riscv64, + .bpfel, + .bpfeb, + .nvptx, + .nvptx64, + => 16, + + // Below this comment are unverified but based on the fact that C requires + // int128_t to be 16 bytes aligned, it's a safe default. + .spu_2, + .csky, + .arc, + .m68k, + .tce, + .tcele, + .le32, + .amdil, + .hsail, + .spir, + .kalimba, + .renderscript32, + .spirv, + .spirv32, + .shave, + .le64, + .amdil64, + .hsail64, + .spir64, + .renderscript64, + .ve, + .spirv64, + .dxil, + .loongarch32, + .loongarch64, + .xtensa, + => 16, + }; +} + +pub fn bitSize(ty: Type, mod: *Module) u64 { + return bitSizeAdvanced(ty, mod, null) catch unreachable; +} + +/// If you pass `opt_sema`, any recursive type resolutions will happen if +/// necessary, possibly returning a CompileError. Passing `null` instead asserts +/// the type is fully resolved, and there will be no error, guaranteed. +pub fn bitSizeAdvanced( + ty: Type, + mod: *Module, + opt_sema: ?*Sema, +) Module.CompileError!u64 { + const target = mod.getTarget(); + const ip = &mod.intern_pool; + + const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; + + switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| return int_type.bits, + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice => return target.ptrBitWidth() * 2, + else => return target.ptrBitWidth(), + }, + .anyframe_type => return target.ptrBitWidth(), + + .array_type => |array_type| { + const len = array_type.lenIncludingSentinel(); + if (len == 0) return 0; + const elem_ty = Type.fromInterned(array_type.child); + const elem_size = @max( + (try elem_ty.abiAlignmentAdvanced(mod, strat)).scalar.toByteUnits() orelse 0, + (try elem_ty.abiSizeAdvanced(mod, strat)).scalar, + ); + if (elem_size == 0) return 0; + const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); + return (len - 1) * 8 * elem_size + elem_bit_size; + }, + .vector_type => |vector_type| { + const child_ty = Type.fromInterned(vector_type.child); + const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); + return elem_bit_size * vector_type.len; + }, + .opt_type => { + // Optionals and error unions are not packed so their bitsize + // includes padding bits. + return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + }, + + .error_set_type, .inferred_error_set_type => return mod.errorSetBits(), + + .error_union_type => { + // Optionals and error unions are not packed so their bitsize + // includes padding bits. + return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + }, + .func_type => unreachable, // represents machine code; not a pointer + .simple_type => |t| switch (t) { + .f16 => return 16, + .f32 => return 32, + .f64 => return 64, + .f80 => return 80, + .f128 => return 128, + + .usize, + .isize, + => return target.ptrBitWidth(), + + .c_char => return target.c_type_bit_size(.char), + .c_short => return target.c_type_bit_size(.short), + .c_ushort => return target.c_type_bit_size(.ushort), + .c_int => return target.c_type_bit_size(.int), + .c_uint => return target.c_type_bit_size(.uint), + .c_long => return target.c_type_bit_size(.long), + .c_ulong => return target.c_type_bit_size(.ulong), + .c_longlong => return target.c_type_bit_size(.longlong), + .c_ulonglong => return target.c_type_bit_size(.ulonglong), + .c_longdouble => return target.c_type_bit_size(.longdouble), + + .bool => return 1, + .void => return 0, + + .anyerror, + .adhoc_inferred_error_set, + => return mod.errorSetBits(), + + .anyopaque => unreachable, + .type => unreachable, + .comptime_int => unreachable, + .comptime_float => unreachable, + .noreturn => unreachable, + .null => unreachable, + .undefined => unreachable, + .enum_literal => unreachable, + .generic_poison => unreachable, + + .atomic_order => unreachable, + .atomic_rmw_op => unreachable, + .calling_convention => unreachable, + .address_space => unreachable, + .float_mode => unreachable, + .reduce_op => unreachable, + .call_modifier => unreachable, + .prefetch_options => unreachable, + .export_options => unreachable, + .extern_options => unreachable, + .type_info => unreachable, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + const is_packed = struct_type.layout == .@"packed"; + if (opt_sema) |sema| { + try sema.resolveTypeFields(ty); + if (is_packed) try sema.resolveTypeLayout(ty); + } + if (is_packed) { + return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(mod, opt_sema); + } + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + }, + + .anon_struct_type => { + if (opt_sema) |sema| try sema.resolveTypeFields(ty); + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + }, + + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + const is_packed = ty.containerLayout(mod) == .@"packed"; + if (opt_sema) |sema| { + try sema.resolveTypeFields(ty); + if (is_packed) try sema.resolveTypeLayout(ty); + } + if (!is_packed) { + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + } + assert(union_type.flagsPtr(ip).status.haveFieldTypes()); + + var size: u64 = 0; + for (0..union_type.field_types.len) |field_index| { + const field_ty = union_type.field_types.get(ip)[field_index]; + size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, opt_sema)); + } + + return size; + }, + .opaque_type => unreachable, + .enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, opt_sema), + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + } +} + +/// Returns true if the type's layout is already resolved and it is safe +/// to use `abiSize`, `abiAlignment` and `bitSize` on it. +pub fn layoutIsResolved(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).haveLayout(ip), + .union_type => ip.loadUnionType(ty.toIntern()).haveLayout(ip), + .array_type => |array_type| { + if (array_type.lenIncludingSentinel() == 0) return true; + return Type.fromInterned(array_type.child).layoutIsResolved(mod); + }, + .opt_type => |child| Type.fromInterned(child).layoutIsResolved(mod), + .error_union_type => |k| Type.fromInterned(k.payload_type).layoutIsResolved(mod), + else => true, + }; +} + +pub fn isSinglePointer(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_info| ptr_info.flags.size == .One, + else => false, + }; +} + +/// Asserts `ty` is a pointer. +pub fn ptrSize(ty: Type, mod: *const Module) std.builtin.Type.Pointer.Size { + return ptrSizeOrNull(ty, mod).?; +} + +/// Returns `null` if `ty` is not a pointer. +pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_info| ptr_info.flags.size, + else => null, + }; +} + +pub fn isSlice(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.size == .Slice, + else => false, + }; +} + +pub fn slicePtrFieldType(ty: Type, mod: *const Module) Type { + return Type.fromInterned(mod.intern_pool.slicePtrType(ty.toIntern())); +} + +pub fn isConstPtr(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.is_const, + else => false, + }; +} + +pub fn isVolatilePtr(ty: Type, mod: *const Module) bool { + return isVolatilePtrIp(ty, &mod.intern_pool); +} + +pub fn isVolatilePtrIp(ty: Type, ip: *const InternPool) bool { + return switch (ip.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.is_volatile, + else => false, + }; +} + +pub fn isAllowzeroPtr(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.is_allowzero, + .opt_type => true, + else => false, + }; +} + +pub fn isCPtr(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.size == .C, + else => false, + }; +} + +pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice => false, + .One, .Many, .C => true, + }, + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |p| switch (p.flags.size) { + .Slice, .C => false, + .Many, .One => !p.flags.is_allowzero, + }, + else => false, + }, + else => false, + }; +} + +/// For pointer-like optionals, returns true, otherwise returns the allowzero property +/// of pointers. +pub fn ptrAllowsZero(ty: Type, mod: *const Module) bool { + if (ty.isPtrLikeOptional(mod)) { + return true; + } + return ty.ptrInfo(mod).flags.is_allowzero; +} + +/// See also `isPtrLikeOptional`. +pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .opt_type => |child_type| child_type == .anyerror_type or switch (mod.intern_pool.indexToKey(child_type)) { + .ptr_type => |ptr_type| ptr_type.flags.size != .C and !ptr_type.flags.is_allowzero, + .error_set_type, .inferred_error_set_type => true, + else => false, + }, + .ptr_type => |ptr_type| ptr_type.flags.size == .C, + else => false, + }; +} + +/// Returns true if the type is optional and would be lowered to a single pointer +/// address value, using 0 for null. Note that this returns true for C pointers. +/// This function must be kept in sync with `Sema.typePtrOrOptionalPtrTy`. +pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.size == .C, + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice, .C => false, + .Many, .One => !ptr_type.flags.is_allowzero, + }, + else => false, + }, + else => false, + }; +} + +/// For *[N]T, returns [N]T. +/// For *T, returns T. +/// For [*]T, returns T. +pub fn childType(ty: Type, mod: *const Module) Type { + return childTypeIp(ty, &mod.intern_pool); +} + +pub fn childTypeIp(ty: Type, ip: *const InternPool) Type { + return Type.fromInterned(ip.childType(ty.toIntern())); +} + +/// For *[N]T, returns T. +/// For ?*T, returns T. +/// For ?*[N]T, returns T. +/// For ?[*]T, returns T. +/// For *T, returns T. +/// For [*]T, returns T. +/// For [N]T, returns T. +/// For []T, returns T. +/// For anyframe->T, returns T. +pub fn elemType2(ty: Type, mod: *const Module) Type { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .One => Type.fromInterned(ptr_type.child).shallowElemType(mod), + .Many, .C, .Slice => Type.fromInterned(ptr_type.child), + }, + .anyframe_type => |child| { + assert(child != .none); + return Type.fromInterned(child); + }, + .vector_type => |vector_type| Type.fromInterned(vector_type.child), + .array_type => |array_type| Type.fromInterned(array_type.child), + .opt_type => |child| Type.fromInterned(mod.intern_pool.childType(child)), + else => unreachable, + }; +} + +fn shallowElemType(child_ty: Type, mod: *const Module) Type { + return switch (child_ty.zigTypeTag(mod)) { + .Array, .Vector => child_ty.childType(mod), + else => child_ty, + }; +} + +/// For vectors, returns the element type. Otherwise returns self. +pub fn scalarType(ty: Type, mod: *Module) Type { + return switch (ty.zigTypeTag(mod)) { + .Vector => ty.childType(mod), + else => ty, + }; +} + +/// Asserts that the type is an optional. +/// Note that for C pointers this returns the type unmodified. +pub fn optionalChild(ty: Type, mod: *const Module) Type { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .opt_type => |child| Type.fromInterned(child), + .ptr_type => |ptr_type| b: { + assert(ptr_type.flags.size == .C); + break :b ty; + }, + else => unreachable, + }; +} + +/// Returns the tag type of a union, if the type is a union and it has a tag type. +/// Otherwise, returns `null`. +pub fn unionTagType(ty: Type, mod: *Module) ?Type { + const ip = &mod.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .union_type => {}, + else => return null, + } + const union_type = ip.loadUnionType(ty.toIntern()); + switch (union_type.flagsPtr(ip).runtime_tag) { + .tagged => { + assert(union_type.flagsPtr(ip).status.haveFieldTypes()); + return Type.fromInterned(union_type.enum_tag_ty); + }, + else => return null, + } +} + +/// Same as `unionTagType` but includes safety tag. +/// Codegen should use this version. +pub fn unionTagTypeSafety(ty: Type, mod: *Module) ?Type { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + if (!union_type.hasTag(ip)) return null; + assert(union_type.haveFieldTypes(ip)); + return Type.fromInterned(union_type.enum_tag_ty); + }, + else => null, + }; +} + +/// Asserts the type is a union; returns the tag type, even if the tag will +/// not be stored at runtime. +pub fn unionTagTypeHypothetical(ty: Type, mod: *Module) Type { + const union_obj = mod.typeToUnion(ty).?; + return Type.fromInterned(union_obj.enum_tag_ty); +} + +pub fn unionFieldType(ty: Type, enum_tag: Value, mod: *Module) ?Type { + const ip = &mod.intern_pool; + const union_obj = mod.typeToUnion(ty).?; + const union_fields = union_obj.field_types.get(ip); + const index = mod.unionTagFieldIndex(union_obj, enum_tag) orelse return null; + return Type.fromInterned(union_fields[index]); +} + +pub fn unionFieldTypeByIndex(ty: Type, index: usize, mod: *Module) Type { + const ip = &mod.intern_pool; + const union_obj = mod.typeToUnion(ty).?; + return Type.fromInterned(union_obj.field_types.get(ip)[index]); +} + +pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 { + const union_obj = mod.typeToUnion(ty).?; + return mod.unionTagFieldIndex(union_obj, enum_tag); +} + +pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + const union_obj = mod.typeToUnion(ty).?; + for (union_obj.field_types.get(ip)) |field_ty| { + if (Type.fromInterned(field_ty).hasRuntimeBits(mod)) return false; + } + return true; +} + +/// Returns the type used for backing storage of this union during comptime operations. +/// Asserts the type is either an extern or packed union. +pub fn unionBackingType(ty: Type, mod: *Module) !Type { + return switch (ty.containerLayout(mod)) { + .@"extern" => try mod.arrayType(.{ .len = ty.abiSize(mod), .child = .u8_type }), + .@"packed" => try mod.intType(.unsigned, @intCast(ty.bitSize(mod))), + .auto => unreachable, + }; +} + +pub fn unionGetLayout(ty: Type, mod: *Module) Module.UnionLayout { + const ip = &mod.intern_pool; + const union_obj = ip.loadUnionType(ty.toIntern()); + return mod.getUnionLayout(union_obj); +} + +pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).layout, + .anon_struct_type => .auto, + .union_type => ip.loadUnionType(ty.toIntern()).flagsPtr(ip).layout, + else => unreachable, + }; +} + +/// Asserts that the type is an error union. +pub fn errorUnionPayload(ty: Type, mod: *Module) Type { + return Type.fromInterned(mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.payload_type); +} + +/// Asserts that the type is an error union. +pub fn errorUnionSet(ty: Type, mod: *Module) Type { + return Type.fromInterned(mod.intern_pool.errorUnionSet(ty.toIntern())); +} + +/// Returns false for unresolved inferred error sets. +pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ty.toIntern()) { + .anyerror_type, .adhoc_inferred_error_set_type => false, + else => switch (ip.indexToKey(ty.toIntern())) { + .error_set_type => |error_set_type| error_set_type.names.len == 0, + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .none, .anyerror_type => false, + else => |t| ip.indexToKey(t).error_set_type.names.len == 0, + }, + else => unreachable, + }, + }; +} + +/// Returns true if it is an error set that includes anyerror, false otherwise. +/// Note that the result may be a false negative if the type did not get error set +/// resolution prior to this call. +pub fn isAnyError(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ty.toIntern()) { + .anyerror_type => true, + .adhoc_inferred_error_set_type => false, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .inferred_error_set_type => |i| ip.funcIesResolved(i).* == .anyerror_type, + else => false, + }, + }; +} + +pub fn isError(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { + .ErrorUnion, .ErrorSet => true, + else => false, + }; +} + +/// Returns whether ty, which must be an error set, includes an error `name`. +/// Might return a false negative if `ty` is an inferred error set and not fully +/// resolved yet. +pub fn errorSetHasFieldIp( + ip: *const InternPool, + ty: InternPool.Index, + name: InternPool.NullTerminatedString, +) bool { + return switch (ty) { + .anyerror_type => true, + else => switch (ip.indexToKey(ty)) { + .error_set_type => |error_set_type| error_set_type.nameIndex(ip, name) != null, + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .anyerror_type => true, + .none => false, + else => |t| ip.indexToKey(t).error_set_type.nameIndex(ip, name) != null, + }, + else => unreachable, + }, + }; +} + +/// Returns whether ty, which must be an error set, includes an error `name`. +/// Might return a false negative if `ty` is an inferred error set and not fully +/// resolved yet. +pub fn errorSetHasField(ty: Type, name: []const u8, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ty.toIntern()) { + .anyerror_type => true, + else => switch (ip.indexToKey(ty.toIntern())) { + .error_set_type => |error_set_type| { + // If the string is not interned, then the field certainly is not present. + const field_name_interned = ip.getString(name).unwrap() orelse return false; + return error_set_type.nameIndex(ip, field_name_interned) != null; + }, + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .anyerror_type => true, + .none => false, + else => |t| { + // If the string is not interned, then the field certainly is not present. + const field_name_interned = ip.getString(name).unwrap() orelse return false; + return ip.indexToKey(t).error_set_type.nameIndex(ip, field_name_interned) != null; + }, + }, + else => unreachable, + }, + }; +} + +/// Asserts the type is an array or vector or struct. +pub fn arrayLen(ty: Type, mod: *const Module) u64 { + return ty.arrayLenIp(&mod.intern_pool); +} + +pub fn arrayLenIp(ty: Type, ip: *const InternPool) u64 { + return ip.aggregateTypeLen(ty.toIntern()); +} + +pub fn arrayLenIncludingSentinel(ty: Type, mod: *const Module) u64 { + return mod.intern_pool.aggregateTypeLenIncludingSentinel(ty.toIntern()); +} + +pub fn vectorLen(ty: Type, mod: *const Module) u32 { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .vector_type => |vector_type| vector_type.len, + .anon_struct_type => |tuple| @intCast(tuple.types.len), + else => unreachable, + }; +} + +/// Asserts the type is an array, pointer or vector. +pub fn sentinel(ty: Type, mod: *const Module) ?Value { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .vector_type, + .struct_type, + .anon_struct_type, + => null, + + .array_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null, + .ptr_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null, + + else => unreachable, + }; +} + +/// Returns true if and only if the type is a fixed-width integer. +pub fn isInt(self: Type, mod: *const Module) bool { + return self.toIntern() != .comptime_int_type and + mod.intern_pool.isIntegerType(self.toIntern()); +} + +/// Returns true if and only if the type is a fixed-width, signed integer. +pub fn isSignedInt(ty: Type, mod: *const Module) bool { + return switch (ty.toIntern()) { + .c_char_type => mod.getTarget().charSignedness() == .signed, + .isize_type, .c_short_type, .c_int_type, .c_long_type, .c_longlong_type => true, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => |int_type| int_type.signedness == .signed, + else => false, + }, + }; +} + +/// Returns true if and only if the type is a fixed-width, unsigned integer. +pub fn isUnsignedInt(ty: Type, mod: *const Module) bool { + return switch (ty.toIntern()) { + .c_char_type => mod.getTarget().charSignedness() == .unsigned, + .usize_type, .c_ushort_type, .c_uint_type, .c_ulong_type, .c_ulonglong_type => true, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => |int_type| int_type.signedness == .unsigned, + else => false, + }, + }; +} + +/// Returns true for integers, enums, error sets, and packed structs. +/// If this function returns true, then intInfo() can be called on the type. +pub fn isAbiInt(ty: Type, mod: *Module) bool { + return switch (ty.zigTypeTag(mod)) { + .Int, .Enum, .ErrorSet => true, + .Struct => ty.containerLayout(mod) == .@"packed", + else => false, + }; +} + +/// Asserts the type is an integer, enum, error set, or vector of one of them. +pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType { + const ip = &mod.intern_pool; + const target = mod.getTarget(); + var ty = starting_ty; + + while (true) switch (ty.toIntern()) { + .anyerror_type, .adhoc_inferred_error_set_type => { + return .{ .signedness = .unsigned, .bits = mod.errorSetBits() }; + }, + .usize_type => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, + .isize_type => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, + .c_char_type => return .{ .signedness = mod.getTarget().charSignedness(), .bits = target.c_type_bit_size(.char) }, + .c_short_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, + .c_ushort_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, + .c_int_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) }, + .c_uint_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) }, + .c_long_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) }, + .c_ulong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, + .c_longlong_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, + .c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, + else => switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| return int_type, + .struct_type => ty = Type.fromInterned(ip.loadStructType(ty.toIntern()).backingIntType(ip).*), + .enum_type => ty = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), + .vector_type => |vector_type| ty = Type.fromInterned(vector_type.child), + + .error_set_type, .inferred_error_set_type => { + return .{ .signedness = .unsigned, .bits = mod.errorSetBits() }; + }, + + .anon_struct_type => unreachable, + + .ptr_type => unreachable, + .anyframe_type => unreachable, + .array_type => unreachable, + + .opt_type => unreachable, + .error_union_type => unreachable, + .func_type => unreachable, + .simple_type => unreachable, // handled via Index enum tag above + + .union_type => unreachable, + .opaque_type => unreachable, + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + }; +} + +pub fn isNamedInt(ty: Type) bool { + return switch (ty.toIntern()) { + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + => true, + + else => false, + }; +} + +/// Returns `false` for `comptime_float`. +pub fn isRuntimeFloat(ty: Type) bool { + return switch (ty.toIntern()) { + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .c_longdouble_type, + => true, + + else => false, + }; +} + +/// Returns `true` for `comptime_float`. +pub fn isAnyFloat(ty: Type) bool { + return switch (ty.toIntern()) { + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .c_longdouble_type, + .comptime_float_type, + => true, + + else => false, + }; +} + +/// Asserts the type is a fixed-size float or comptime_float. +/// Returns 128 for comptime_float types. +pub fn floatBits(ty: Type, target: Target) u16 { + return switch (ty.toIntern()) { + .f16_type => 16, + .f32_type => 32, + .f64_type => 64, + .f80_type => 80, + .f128_type, .comptime_float_type => 128, + .c_longdouble_type => target.c_type_bit_size(.longdouble), + + else => unreachable, + }; +} + +/// Asserts the type is a function or a function pointer. +pub fn fnReturnType(ty: Type, mod: *Module) Type { + return Type.fromInterned(mod.intern_pool.funcTypeReturnType(ty.toIntern())); +} + +/// Asserts the type is a function. +pub fn fnCallingConvention(ty: Type, mod: *Module) std.builtin.CallingConvention { + return mod.intern_pool.indexToKey(ty.toIntern()).func_type.cc; +} + +pub fn isValidParamType(self: Type, mod: *const Module) bool { + return switch (self.zigTypeTagOrPoison(mod) catch return true) { + .Opaque, .NoReturn => false, + else => true, + }; +} + +pub fn isValidReturnType(self: Type, mod: *const Module) bool { + return switch (self.zigTypeTagOrPoison(mod) catch return true) { + .Opaque => false, + else => true, + }; +} + +/// Asserts the type is a function. +pub fn fnIsVarArgs(ty: Type, mod: *Module) bool { + return mod.intern_pool.indexToKey(ty.toIntern()).func_type.is_var_args; +} + +pub fn isNumeric(ty: Type, mod: *const Module) bool { + return switch (ty.toIntern()) { + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .c_longdouble_type, + .comptime_int_type, + .comptime_float_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + => true, + + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => true, + else => false, + }, + }; +} + +/// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which +/// resolves field types rather than asserting they are already resolved. +pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { + var ty = starting_type; + const ip = &mod.intern_pool; + while (true) switch (ty.toIntern()) { + .empty_struct_type => return Value.empty_struct, + + else => switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| { + if (int_type.bits == 0) { + return try mod.intValue(ty, 0); + } else { + return null; + } + }, + + .ptr_type, + .error_union_type, + .func_type, + .anyframe_type, + .error_set_type, + .inferred_error_set_type, + => return null, + + inline .array_type, .vector_type => |seq_type, seq_tag| { + const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none; + if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = &.{} }, + } }))); + if (try Type.fromInterned(seq_type.child).onePossibleValue(mod)) |opv| { + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = opv.toIntern() }, + } }))); + } + return null; + }, + .opt_type => |child| { + if (child == .noreturn_type) { + return try mod.nullValue(ty); + } else { + return null; + } + }, + + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .enum_literal, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type_info, + .adhoc_inferred_error_set, + => return null, + + .void => return Value.void, + .noreturn => return Value.@"unreachable", + .null => return Value.null, + .undefined => return Value.undef, + + .generic_poison => unreachable, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + assert(struct_type.haveFieldTypes(ip)); + if (struct_type.knownNonOpv(ip)) + return null; + const field_vals = try mod.gpa.alloc(InternPool.Index, struct_type.field_types.len); + defer mod.gpa.free(field_vals); + for (field_vals, 0..) |*field_val, i_usize| { + const i: u32 = @intCast(i_usize); + if (struct_type.fieldIsComptime(ip, i)) { + assert(struct_type.haveFieldInits(ip)); + field_val.* = struct_type.field_inits.get(ip)[i]; + continue; + } + const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + if (try field_ty.onePossibleValue(mod)) |field_opv| { + field_val.* = field_opv.toIntern(); + } else return null; + } + + // In this case the struct has no runtime-known fields and + // therefore has one possible value. + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = field_vals }, + } }))); + }, + + .anon_struct_type => |tuple| { + for (tuple.values.get(ip)) |val| { + if (val == .none) return null; + } + // In this case the struct has all comptime-known fields and + // therefore has one possible value. + // TODO: write something like getCoercedInts to avoid needing to dupe + const duped_values = try mod.gpa.dupe(InternPool.Index, tuple.values.get(ip)); + defer mod.gpa.free(duped_values); + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = duped_values }, + } }))); + }, + + .union_type => { + const union_obj = ip.loadUnionType(ty.toIntern()); + const tag_val = (try Type.fromInterned(union_obj.enum_tag_ty).onePossibleValue(mod)) orelse + return null; + if (union_obj.field_types.len == 0) { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return Value.fromInterned(only); + } + const only_field_ty = union_obj.field_types.get(ip)[0]; + const val_val = (try Type.fromInterned(only_field_ty).onePossibleValue(mod)) orelse + return null; + const only = try mod.intern(.{ .un = .{ + .ty = ty.toIntern(), + .tag = tag_val.toIntern(), + .val = val_val.toIntern(), + } }); + return Value.fromInterned(only); + }, + .opaque_type => return null, + .enum_type => { + const enum_type = ip.loadEnumType(ty.toIntern()); + switch (enum_type.tag_mode) { + .nonexhaustive => { + if (enum_type.tag_ty == .comptime_int_type) return null; + + if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(mod)) |int_opv| { + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = int_opv.toIntern(), + } }); + return Value.fromInterned(only); + } + + return null; + }, + .auto, .explicit => { + if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null; + + switch (enum_type.names.len) { + 0 => { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return Value.fromInterned(only); + }, + 1 => { + if (enum_type.values.len == 0) { + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = try mod.intern(.{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = 0 }, + } }), + } }); + return Value.fromInterned(only); + } else { + return Value.fromInterned(enum_type.values.get(ip)[0]); + } + }, + else => return null, + } + }, + } + }, + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + }; +} + +/// During semantic analysis, instead call `Sema.typeRequiresComptime` which +/// resolves field types rather than asserting they are already resolved. +pub fn comptimeOnly(ty: Type, mod: *Module) bool { + return ty.comptimeOnlyAdvanced(mod, null) catch unreachable; +} + +/// `generic_poison` will return false. +/// May return false negatives when structs and unions are having their field types resolved. +/// If `opt_sema` is not provided, asserts that the type is sufficiently resolved. +pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.CompileError!bool { + const ip = &mod.intern_pool; + return switch (ty.toIntern()) { + .empty_struct_type => false, + + else => switch (ip.indexToKey(ty.toIntern())) { + .int_type => false, + .ptr_type => |ptr_type| { + const child_ty = Type.fromInterned(ptr_type.child); + switch (child_ty.zigTypeTag(mod)) { + .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(mod, opt_sema), + .Opaque => return false, + else => return child_ty.comptimeOnlyAdvanced(mod, opt_sema), + } + }, + .anyframe_type => |child| { + if (child == .none) return false; + return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema); + }, + .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(mod, opt_sema), + .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(mod, opt_sema), + .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema), + .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(mod, opt_sema), + + .error_set_type, + .inferred_error_set_type, + => false, + + // These are function bodies, not function pointers. + .func_type => true, + + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .anyerror, + .adhoc_inferred_error_set, + .noreturn, + .generic_poison, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => false, + + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => true, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + // packed structs cannot be comptime-only because they have a well-defined + // memory layout and every field has a well-defined bit pattern. + if (struct_type.layout == .@"packed") + return false; + + // A struct with no fields is not comptime-only. + return switch (struct_type.flagsPtr(ip).requires_comptime) { + .no, .wip => false, + .yes => true, + .unknown => { + // The type is not resolved; assert that we have a Sema. + const sema = opt_sema.?; + + if (struct_type.flagsPtr(ip).field_types_wip) + return false; + + struct_type.flagsPtr(ip).requires_comptime = .wip; + errdefer struct_type.flagsPtr(ip).requires_comptime = .unknown; + + try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type); + + for (0..struct_type.field_types.len) |i_usize| { + const i: u32 = @intCast(i_usize); + if (struct_type.fieldIsComptime(ip, i)) continue; + const field_ty = struct_type.field_types.get(ip)[i]; + if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) { + // Note that this does not cause the layout to + // be considered resolved. Comptime-only types + // still maintain a layout of their + // runtime-known fields. + struct_type.flagsPtr(ip).requires_comptime = .yes; + return true; + } + } + + struct_type.flagsPtr(ip).requires_comptime = .no; + return false; + }, + }; + }, + + .anon_struct_type => |tuple| { + for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { + const have_comptime_val = val != .none; + if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) return true; + } + return false; + }, + + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + switch (union_type.flagsPtr(ip).requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + // The type is not resolved; assert that we have a Sema. + const sema = opt_sema.?; + + if (union_type.flagsPtr(ip).status == .field_types_wip) + return false; + + union_type.flagsPtr(ip).requires_comptime = .wip; + errdefer union_type.flagsPtr(ip).requires_comptime = .unknown; + + try sema.resolveTypeFieldsUnion(ty, union_type); + + for (0..union_type.field_types.len) |field_idx| { + const field_ty = union_type.field_types.get(ip)[field_idx]; + if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) { + union_type.flagsPtr(ip).requires_comptime = .yes; + return true; + } + } + + union_type.flagsPtr(ip).requires_comptime = .no; + return false; + }, + } + }, + + .opaque_type => false, + + .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, opt_sema), + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + }; +} + +pub fn isVector(ty: Type, mod: *const Module) bool { + return ty.zigTypeTag(mod) == .Vector; +} + +/// Returns 0 if not a vector, otherwise returns @bitSizeOf(Element) * vector_len. +pub fn totalVectorBits(ty: Type, zcu: *Zcu) u64 { + if (!ty.isVector(zcu)) return 0; + const v = zcu.intern_pool.indexToKey(ty.toIntern()).vector_type; + return v.len * Type.fromInterned(v.child).bitSize(zcu); +} + +pub fn isArrayOrVector(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { + .Array, .Vector => true, + else => false, + }; +} + +pub fn isIndexable(ty: Type, mod: *Module) bool { + return switch (ty.zigTypeTag(mod)) { + .Array, .Vector => true, + .Pointer => switch (ty.ptrSize(mod)) { + .Slice, .Many, .C => true, + .One => switch (ty.childType(mod).zigTypeTag(mod)) { + .Array, .Vector => true, + .Struct => ty.childType(mod).isTuple(mod), + else => false, + }, + }, + .Struct => ty.isTuple(mod), + else => false, + }; +} + +pub fn indexableHasLen(ty: Type, mod: *Module) bool { + return switch (ty.zigTypeTag(mod)) { + .Array, .Vector => true, + .Pointer => switch (ty.ptrSize(mod)) { + .Many, .C => false, + .Slice => true, + .One => switch (ty.childType(mod).zigTypeTag(mod)) { + .Array, .Vector => true, + .Struct => ty.childType(mod).isTuple(mod), + else => false, + }, + }, + .Struct => ty.isTuple(mod), + else => false, + }; +} + +/// Asserts that the type can have a namespace. +pub fn getNamespaceIndex(ty: Type, zcu: *Zcu) InternPool.OptionalNamespaceIndex { + return ty.getNamespace(zcu).?; +} + +/// Returns null if the type has no namespace. +pub fn getNamespace(ty: Type, zcu: *Zcu) ?InternPool.OptionalNamespaceIndex { + const ip = &zcu.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .opaque_type => ip.loadOpaqueType(ty.toIntern()).namespace, + .struct_type => ip.loadStructType(ty.toIntern()).namespace, + .union_type => ip.loadUnionType(ty.toIntern()).namespace, + .enum_type => ip.loadEnumType(ty.toIntern()).namespace, + + .anon_struct_type => .none, + .simple_type => |s| switch (s) { + .anyopaque, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type_info, + => .none, + else => null, + }, + + else => null, + }; +} + +// Works for vectors and vectors of integers. +pub fn minInt(ty: Type, mod: *Module, dest_ty: Type) !Value { + const scalar = try minIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod)); + return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .repeated_elem = scalar.toIntern() }, + } }))) else scalar; +} + +/// Asserts that the type is an integer. +pub fn minIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { + const info = ty.intInfo(mod); + if (info.signedness == .unsigned) return mod.intValue(dest_ty, 0); + if (info.bits == 0) return mod.intValue(dest_ty, -1); + + if (std.math.cast(u6, info.bits - 1)) |shift| { + const n = @as(i64, std.math.minInt(i64)) >> (63 - shift); + return mod.intValue(dest_ty, n); + } + + var res = try std.math.big.int.Managed.init(mod.gpa); + defer res.deinit(); + + try res.setTwosCompIntLimit(.min, info.signedness, info.bits); + + return mod.intValue_big(dest_ty, res.toConst()); +} + +// Works for vectors and vectors of integers. +/// The returned Value will have type dest_ty. +pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value { + const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod)); + return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .repeated_elem = scalar.toIntern() }, + } }))) else scalar; +} + +/// The returned Value will have type dest_ty. +pub fn maxIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { + const info = ty.intInfo(mod); + + switch (info.bits) { + 0 => return switch (info.signedness) { + .signed => try mod.intValue(dest_ty, -1), + .unsigned => try mod.intValue(dest_ty, 0), + }, + 1 => return switch (info.signedness) { + .signed => try mod.intValue(dest_ty, 0), + .unsigned => try mod.intValue(dest_ty, 1), + }, + else => {}, + } + + if (std.math.cast(u6, info.bits - 1)) |shift| switch (info.signedness) { + .signed => { + const n = @as(i64, std.math.maxInt(i64)) >> (63 - shift); + return mod.intValue(dest_ty, n); + }, + .unsigned => { + const n = @as(u64, std.math.maxInt(u64)) >> (63 - shift); + return mod.intValue(dest_ty, n); + }, + }; + + var res = try std.math.big.int.Managed.init(mod.gpa); + defer res.deinit(); + + try res.setTwosCompIntLimit(.max, info.signedness, info.bits); + + return mod.intValue_big(dest_ty, res.toConst()); +} + +/// Asserts the type is an enum or a union. +pub fn intTagType(ty: Type, mod: *Module) Type { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .union_type => Type.fromInterned(ip.loadUnionType(ty.toIntern()).enum_tag_ty).intTagType(mod), + .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), + else => unreachable, + }; +} + +pub fn isNonexhaustiveEnum(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) { + .nonexhaustive => true, + .auto, .explicit => false, + }, + else => false, + }; +} + +// Asserts that `ty` is an error set and not `anyerror`. +// Asserts that `ty` is resolved if it is an inferred error set. +pub fn errorSetNames(ty: Type, mod: *Module) InternPool.NullTerminatedString.Slice { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .error_set_type => |x| x.names, + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .none => unreachable, // unresolved inferred error set + .anyerror_type => unreachable, + else => |t| ip.indexToKey(t).error_set_type.names, + }, + else => unreachable, + }; +} + +pub fn enumFields(ty: Type, mod: *Module) InternPool.NullTerminatedString.Slice { + return mod.intern_pool.loadEnumType(ty.toIntern()).names; +} + +pub fn enumFieldCount(ty: Type, mod: *Module) usize { + return mod.intern_pool.loadEnumType(ty.toIntern()).names.len; +} + +pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString { + const ip = &mod.intern_pool; + return ip.loadEnumType(ty.toIntern()).names.get(ip)[field_index]; +} + +pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, mod: *Module) ?u32 { + const ip = &mod.intern_pool; + const enum_type = ip.loadEnumType(ty.toIntern()); + return enum_type.nameIndex(ip, field_name); +} + +/// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or +/// an integer which represents the enum value. Returns the field index in +/// declaration order, or `null` if `enum_tag` does not match any field. +pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 { + const ip = &mod.intern_pool; + const enum_type = ip.loadEnumType(ty.toIntern()); + const int_tag = switch (ip.indexToKey(enum_tag.toIntern())) { + .int => enum_tag.toIntern(), + .enum_tag => |info| info.int, + else => unreachable, + }; + assert(ip.typeOf(int_tag) == enum_type.tag_ty); + return enum_type.tagValueIndex(ip, int_tag); +} + +/// Returns none in the case of a tuple which uses the integer index as the field name. +pub fn structFieldName(ty: Type, index: usize, mod: *Module) InternPool.OptionalNullTerminatedString { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, index), + .anon_struct_type => |anon_struct| anon_struct.fieldName(ip, index), + else => unreachable, + }; +} + +pub fn structFieldCount(ty: Type, mod: *Module) u32 { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).field_types.len, + .anon_struct_type => |anon_struct| anon_struct.types.len, + else => unreachable, + }; +} + +/// Supports structs and unions. +pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => Type.fromInterned(ip.loadStructType(ty.toIntern()).field_types.get(ip)[index]), + .union_type => { + const union_obj = ip.loadUnionType(ty.toIntern()); + return Type.fromInterned(union_obj.field_types.get(ip)[index]); + }, + .anon_struct_type => |anon_struct| Type.fromInterned(anon_struct.types.get(ip)[index]), + else => unreachable, + }; +} + +pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment { + return ty.structFieldAlignAdvanced(index, zcu, null) catch unreachable; +} + +pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, opt_sema: ?*Sema) !Alignment { + const ip = &zcu.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + assert(struct_type.layout != .@"packed"); + const explicit_align = struct_type.fieldAlign(ip, index); + const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]); + if (opt_sema) |sema| { + return sema.structFieldAlignment(explicit_align, field_ty, struct_type.layout); + } else { + return zcu.structFieldAlignment(explicit_align, field_ty, struct_type.layout); + } + }, + .anon_struct_type => |anon_struct| { + return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, if (opt_sema) |sema| .{ .sema = sema } else .eager)).scalar; + }, + .union_type => { + const union_obj = ip.loadUnionType(ty.toIntern()); + if (opt_sema) |sema| { + return sema.unionFieldAlignment(union_obj, @intCast(index)); + } else { + return zcu.unionFieldNormalAlignment(union_obj, @intCast(index)); + } + }, + else => unreachable, + } +} + +pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value { + const ip = &mod.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + const val = struct_type.fieldInit(ip, index); + // TODO: avoid using `unreachable` to indicate this. + if (val == .none) return Value.@"unreachable"; + return Value.fromInterned(val); + }, + .anon_struct_type => |anon_struct| { + const val = anon_struct.values.get(ip)[index]; + // TODO: avoid using `unreachable` to indicate this. + if (val == .none) return Value.@"unreachable"; + return Value.fromInterned(val); + }, + else => unreachable, + } +} + +pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value { + const ip = &mod.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + if (struct_type.fieldIsComptime(ip, index)) { + assert(struct_type.haveFieldInits(ip)); + return Value.fromInterned(struct_type.field_inits.get(ip)[index]); + } else { + return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(mod); + } + }, + .anon_struct_type => |tuple| { + const val = tuple.values.get(ip)[index]; + if (val == .none) { + return Type.fromInterned(tuple.types.get(ip)[index]).onePossibleValue(mod); + } else { + return Value.fromInterned(val); + } + }, + else => unreachable, + } +} + +pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).fieldIsComptime(ip, index), + .anon_struct_type => |anon_struct| anon_struct.values.get(ip)[index] != .none, + else => unreachable, + }; +} + +pub const FieldOffset = struct { + field: usize, + offset: u64, +}; + +/// Supports structs and unions. +pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { + const ip = &mod.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + assert(struct_type.haveLayout(ip)); + assert(struct_type.layout != .@"packed"); + return struct_type.offsets.get(ip)[index]; + }, + + .anon_struct_type => |tuple| { + var offset: u64 = 0; + var big_align: Alignment = .none; + + for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| { + if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) { + // comptime field + if (i == index) return offset; + continue; + } + + const field_align = Type.fromInterned(field_ty).abiAlignment(mod); + big_align = big_align.max(field_align); + offset = field_align.forward(offset); + if (i == index) return offset; + offset += Type.fromInterned(field_ty).abiSize(mod); + } + offset = big_align.max(.@"1").forward(offset); + return offset; + }, + + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + if (!union_type.hasTag(ip)) + return 0; + const layout = mod.getUnionLayout(union_type); + if (layout.tag_align.compare(.gte, layout.payload_align)) { + // {Tag, Payload} + return layout.payload_align.forward(layout.tag_size); + } else { + // {Payload, Tag} + return 0; + } + }, + + else => unreachable, + } +} + +pub fn getOwnerDecl(ty: Type, mod: *Module) InternPool.DeclIndex { + return ty.getOwnerDeclOrNull(mod) orelse unreachable; +} + +pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?InternPool.DeclIndex { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).decl.unwrap(), + .union_type => ip.loadUnionType(ty.toIntern()).decl, + .opaque_type => ip.loadOpaqueType(ty.toIntern()).decl, + .enum_type => ip.loadEnumType(ty.toIntern()).decl, + else => null, + }; +} + +pub fn srcLocOrNull(ty: Type, zcu: *Zcu) ?Module.LazySrcLoc { + const ip = &zcu.intern_pool; + return .{ + .base_node_inst = switch (ip.indexToKey(ty.toIntern())) { + .struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) { + .declared => |d| d.zir_index, + .reified => |r| r.zir_index, + .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, + .empty_struct => return null, + }, + else => return null, + }, + .offset = Module.LazySrcLoc.Offset.nodeOffset(0), + }; +} + +pub fn srcLoc(ty: Type, zcu: *Zcu) Module.LazySrcLoc { + return ty.srcLocOrNull(zcu).?; +} + +pub fn isGenericPoison(ty: Type) bool { + return ty.toIntern() == .generic_poison_type; +} + +pub fn isTuple(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + if (struct_type.layout == .@"packed") return false; + if (struct_type.decl == .none) return false; + return struct_type.flagsPtr(ip).is_tuple; + }, + .anon_struct_type => |anon_struct| anon_struct.names.len == 0, + else => false, + }; +} + +pub fn isAnonStruct(ty: Type, mod: *Module) bool { + if (ty.toIntern() == .empty_struct_type) return true; + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.names.len > 0, + else => false, + }; +} + +pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + if (struct_type.layout == .@"packed") return false; + if (struct_type.decl == .none) return false; + return struct_type.flagsPtr(ip).is_tuple; + }, + .anon_struct_type => true, + else => false, + }; +} + +pub fn isSimpleTuple(ty: Type, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0, + else => false, + }; +} + +pub fn isSimpleTupleOrAnonStruct(ty: Type, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .anon_struct_type => true, + else => false, + }; +} + +/// Traverses optional child types and error union payloads until the type +/// is not a pointer. For `E!?u32`, returns `u32`; for `*u8`, returns `*u8`. +pub fn optEuBaseType(ty: Type, mod: *Module) Type { + var cur = ty; + while (true) switch (cur.zigTypeTag(mod)) { + .Optional => cur = cur.optionalChild(mod), + .ErrorUnion => cur = cur.errorUnionPayload(mod), + else => return cur, + }; +} + +pub fn toUnsigned(ty: Type, mod: *Module) !Type { + return switch (ty.zigTypeTag(mod)) { + .Int => mod.intType(.unsigned, ty.intInfo(mod).bits), + .Vector => try mod.vectorType(.{ + .len = ty.vectorLen(mod), + .child = (try ty.childType(mod).toUnsigned(mod)).toIntern(), + }), + else => unreachable, + }; +} + +pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index { + const ip = &zcu.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).zir_index.unwrap(), + .union_type => ip.loadUnionType(ty.toIntern()).zir_index, + .enum_type => ip.loadEnumType(ty.toIntern()).zir_index.unwrap(), + .opaque_type => ip.loadOpaqueType(ty.toIntern()).zir_index, + else => null, + }; +} + +pub fn typeDeclSrcLine(ty: Type, zcu: *const Zcu) ?u32 { + const ip = &zcu.intern_pool; + const tracked = switch (ip.indexToKey(ty.toIntern())) { + .struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) { + .declared => |d| d.zir_index, + .reified => |r| r.zir_index, + .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, + .empty_struct => return null, + }, + else => return null, + }; + const info = tracked.resolveFull(&zcu.intern_pool); + const file = zcu.import_table.values()[zcu.path_digest_map.getIndex(info.path_digest).?]; + assert(file.zir_loaded); + const zir = file.zir; + const inst = zir.instructions.get(@intFromEnum(info.inst)); + assert(inst.tag == .extended); + return switch (inst.data.extended.opcode) { + .struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_line, + .union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_line, + .enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_line, + .opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_line, + .reify => zir.extraData(Zir.Inst.Reify, inst.data.extended.operand).data.src_line, + else => unreachable, + }; +} + +/// Given a namespace type, returns its list of caotured values. +pub fn getCaptures(ty: Type, zcu: *const Zcu) InternPool.CaptureValue.Slice { + const ip = &zcu.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).captures, + .union_type => ip.loadUnionType(ty.toIntern()).captures, + .enum_type => ip.loadEnumType(ty.toIntern()).captures, + .opaque_type => ip.loadOpaqueType(ty.toIntern()).captures, + else => unreachable, + }; +} + +pub fn arrayBase(ty: Type, zcu: *const Zcu) struct { Type, u64 } { + var cur_ty: Type = ty; + var cur_len: u64 = 1; + while (cur_ty.zigTypeTag(zcu) == .Array) { + cur_len *= cur_ty.arrayLenIncludingSentinel(zcu); + cur_ty = cur_ty.childType(zcu); + } + return .{ cur_ty, cur_len }; +} + +pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: u32, zcu: *Zcu) union(enum) { + /// The result is a bit-pointer with the same value and a new packed offset. + bit_ptr: InternPool.Key.PtrType.PackedOffset, + /// The result is a standard pointer. + byte_ptr: struct { + /// The byte offset of the field pointer from the parent pointer value. + offset: u64, + /// The alignment of the field pointer type. + alignment: InternPool.Alignment, + }, +} { + comptime assert(Type.packed_struct_layout_version == 2); + + const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu); + const field_ty = struct_ty.structFieldType(field_idx, zcu); + + var bit_offset: u16 = 0; + var running_bits: u16 = 0; + for (0..struct_ty.structFieldCount(zcu)) |i| { + const f_ty = struct_ty.structFieldType(i, zcu); + if (i == field_idx) { + bit_offset = running_bits; + } + running_bits += @intCast(f_ty.bitSize(zcu)); + } + + const res_host_size: u16, const res_bit_offset: u16 = if (parent_ptr_info.packed_offset.host_size != 0) + .{ parent_ptr_info.packed_offset.host_size, parent_ptr_info.packed_offset.bit_offset + bit_offset } + else + .{ (running_bits + 7) / 8, bit_offset }; + + // If the field happens to be byte-aligned, simplify the pointer type. + // We can only do this if the pointee's bit size matches its ABI byte size, + // so that loads and stores do not interfere with surrounding packed bits. + // + // TODO: we do not attempt this with big-endian targets yet because of nested + // structs and floats. I need to double-check the desired behavior for big endian + // targets before adding the necessary complications to this code. This will not + // cause miscompilations; it only means the field pointer uses bit masking when it + // might not be strictly necessary. + if (res_bit_offset % 8 == 0 and field_ty.bitSize(zcu) == field_ty.abiSize(zcu) * 8 and zcu.getTarget().cpu.arch.endian() == .little) { + const byte_offset = res_bit_offset / 8; + const new_align = Alignment.fromLog2Units(@ctz(byte_offset | parent_ptr_ty.ptrAlignment(zcu).toByteUnits().?)); + return .{ .byte_ptr = .{ + .offset = byte_offset, + .alignment = new_align, + } }; + } + + return .{ .bit_ptr = .{ + .host_size = res_host_size, + .bit_offset = res_bit_offset, + } }; +} + +pub const @"u1": Type = .{ .ip_index = .u1_type }; +pub const @"u8": Type = .{ .ip_index = .u8_type }; +pub const @"u16": Type = .{ .ip_index = .u16_type }; +pub const @"u29": Type = .{ .ip_index = .u29_type }; +pub const @"u32": Type = .{ .ip_index = .u32_type }; +pub const @"u64": Type = .{ .ip_index = .u64_type }; +pub const @"u128": Type = .{ .ip_index = .u128_type }; + +pub const @"i8": Type = .{ .ip_index = .i8_type }; +pub const @"i16": Type = .{ .ip_index = .i16_type }; +pub const @"i32": Type = .{ .ip_index = .i32_type }; +pub const @"i64": Type = .{ .ip_index = .i64_type }; +pub const @"i128": Type = .{ .ip_index = .i128_type }; + +pub const @"f16": Type = .{ .ip_index = .f16_type }; +pub const @"f32": Type = .{ .ip_index = .f32_type }; +pub const @"f64": Type = .{ .ip_index = .f64_type }; +pub const @"f80": Type = .{ .ip_index = .f80_type }; +pub const @"f128": Type = .{ .ip_index = .f128_type }; + +pub const @"bool": Type = .{ .ip_index = .bool_type }; +pub const @"usize": Type = .{ .ip_index = .usize_type }; +pub const @"isize": Type = .{ .ip_index = .isize_type }; +pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type }; +pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type }; +pub const @"void": Type = .{ .ip_index = .void_type }; +pub const @"type": Type = .{ .ip_index = .type_type }; +pub const @"anyerror": Type = .{ .ip_index = .anyerror_type }; +pub const @"anyopaque": Type = .{ .ip_index = .anyopaque_type }; +pub const @"anyframe": Type = .{ .ip_index = .anyframe_type }; +pub const @"null": Type = .{ .ip_index = .null_type }; +pub const @"undefined": Type = .{ .ip_index = .undefined_type }; +pub const @"noreturn": Type = .{ .ip_index = .noreturn_type }; + +pub const @"c_char": Type = .{ .ip_index = .c_char_type }; +pub const @"c_short": Type = .{ .ip_index = .c_short_type }; +pub const @"c_ushort": Type = .{ .ip_index = .c_ushort_type }; +pub const @"c_int": Type = .{ .ip_index = .c_int_type }; +pub const @"c_uint": Type = .{ .ip_index = .c_uint_type }; +pub const @"c_long": Type = .{ .ip_index = .c_long_type }; +pub const @"c_ulong": Type = .{ .ip_index = .c_ulong_type }; +pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type }; +pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type }; +pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type }; + +pub const slice_const_u8: Type = .{ .ip_index = .slice_const_u8_type }; +pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type }; +pub const single_const_pointer_to_comptime_int: Type = .{ + .ip_index = .single_const_pointer_to_comptime_int_type, +}; +pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type }; +pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type }; + +pub const generic_poison: Type = .{ .ip_index = .generic_poison_type }; + +pub fn smallestUnsignedBits(max: u64) u16 { + if (max == 0) return 0; + const base = std.math.log2(max); + const upper = (@as(u64, 1) << @as(u6, @intCast(base))) - 1; + return @as(u16, @intCast(base + @intFromBool(upper < max))); +} + +/// This is only used for comptime asserts. Bump this number when you make a change +/// to packed struct layout to find out all the places in the codebase you need to edit! +pub const packed_struct_layout_version = 2; + +fn cTypeAlign(target: Target, c_type: Target.CType) Alignment { + return Alignment.fromByteUnits(target.c_type_alignment(c_type)); +} diff --git a/src/Value.zig b/src/Value.zig index 5719ed3689..20b24510ef 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -1,6 +1,6 @@ const std = @import("std"); const builtin = @import("builtin"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const assert = std.debug.assert; const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; diff --git a/src/Zcu.zig b/src/Zcu.zig index 508bef971a..27e9347268 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -20,7 +20,7 @@ const Zcu = @This(); const Compilation = @import("Compilation.zig"); const Cache = std.Build.Cache; const Value = @import("Value.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Package = @import("Package.zig"); const link = @import("link.zig"); const Air = @import("Air.zig"); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 51b62aba14..14b9cce3a8 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -8,7 +8,7 @@ const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index 2a25dbf1be..5eeeee0fa2 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -3,7 +3,7 @@ const builtin = @import("builtin"); const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Zcu = @import("../../Zcu.zig"); /// Deprecated. const Module = Zcu; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index ae802c8f48..0423b63d23 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -8,7 +8,7 @@ const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig index b85deaa3ce..da19760d8b 100644 --- a/src/arch/arm/Emit.zig +++ b/src/arch/arm/Emit.zig @@ -11,7 +11,7 @@ const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); /// Deprecated. const Module = Zcu; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const ErrorMsg = Module.ErrorMsg; const Target = std.Target; const assert = std.debug.assert; diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index 1a434b3b8c..f88218bc57 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -3,7 +3,7 @@ const assert = std.debug.assert; const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Zcu = @import("../../Zcu.zig"); /// Deprecated. const Module = Zcu; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 2bba63f616..3f01b74733 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -7,7 +7,7 @@ const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 0753b142b1..80a533d880 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -431,7 +431,7 @@ pub const RegisterList = struct { const Mir = @This(); const std = @import("std"); const builtin = @import("builtin"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const assert = std.debug.assert; diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index dd35fc41e5..042af564f6 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -2,7 +2,7 @@ const std = @import("std"); const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const InternPool = @import("../../InternPool.zig"); const Zcu = @import("../../Zcu.zig"); const assert = std.debug.assert; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index ca1cef1250..2416eb9176 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -21,7 +21,7 @@ const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const CodeGenError = codegen.CodeGenError; const Result = @import("../../codegen.zig").Result; const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 91d637c765..2ecface64e 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -13,7 +13,7 @@ const codegen = @import("../../codegen.zig"); const Zcu = @import("../../Zcu.zig"); const InternPool = @import("../../InternPool.zig"); const Decl = Zcu.Decl; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const Compilation = @import("../../Compilation.zig"); const link = @import("../../link.zig"); diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index 03c68daa85..23097990ac 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -8,7 +8,7 @@ const std = @import("std"); const Target = std.Target; const assert = std.debug.assert; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Zcu = @import("../../Zcu.zig"); /// Defines how to pass a type as part of a function signature, diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 31ed0bf514..def0edcac9 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -32,7 +32,7 @@ const Module = Zcu; const InternPool = @import("../../InternPool.zig"); const Alignment = InternPool.Alignment; const Target = std.Target; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const Instruction = @import("encoder.zig").Instruction; diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index f1117f16c1..05c0c9626c 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -537,6 +537,6 @@ const testing = std.testing; const InternPool = @import("../../InternPool.zig"); const Register = @import("bits.zig").Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const Zcu = @import("../../Zcu.zig"); diff --git a/src/codegen.zig b/src/codegen.zig index 769e8f7cd5..5e25359d44 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -20,7 +20,7 @@ const Zcu = @import("Zcu.zig"); /// Deprecated. const Module = Zcu; const Target = std.Target; -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Value = @import("Value.zig"); const Zir = std.zig.Zir; const Alignment = InternPool.Alignment; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 6bd8bcc6fc..13d9e67519 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -9,7 +9,7 @@ const Zcu = @import("../Zcu.zig"); const Module = @import("../Package/Module.zig"); const Compilation = @import("../Compilation.zig"); const Value = @import("../Value.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const C = link.File.C; const Decl = Zcu.Decl; const trace = @import("../tracy.zig").trace; diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig index e316d7a154..0a0d84f061 100644 --- a/src/codegen/c/Type.zig +++ b/src/codegen/c/Type.zig @@ -2583,6 +2583,6 @@ const assert = std.debug.assert; const CType = @This(); const Module = @import("../../Package/Module.zig"); const std = @import("std"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Zcu = @import("../../Zcu.zig"); const DeclIndex = @import("../../InternPool.zig").DeclIndex; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index c65158a88a..b3718db5b1 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -22,7 +22,7 @@ const Package = @import("../Package.zig"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); const Value = @import("../Value.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const x86_64_abi = @import("../arch/x86_64/abi.zig"); const wasm_c_abi = @import("../arch/wasm/abi.zig"); const aarch64_c_abi = @import("../arch/aarch64/abi.zig"); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 494ec0737e..c56a5a799e 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -9,7 +9,7 @@ const Zcu = @import("../Zcu.zig"); /// Deprecated. const Module = Zcu; const Decl = Module.Decl; -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); diff --git a/src/link.zig b/src/link.zig index 7f108c283f..009b38a681 100644 --- a/src/link.zig +++ b/src/link.zig @@ -18,7 +18,7 @@ const Zcu = @import("Zcu.zig"); /// Deprecated. const Module = Zcu; const InternPool = @import("InternPool.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Value = @import("Value.zig"); const LlvmObject = @import("codegen/llvm.zig").Object; const lldMain = @import("main.zig").lldMain; diff --git a/src/link/C.zig b/src/link/C.zig index 3a8d06b5ee..8372029d2d 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -14,7 +14,7 @@ const Compilation = @import("../Compilation.zig"); const codegen = @import("../codegen/c.zig"); const link = @import("../link.zig"); const trace = @import("../tracy.zig").trace; -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 366ba87509..55028fc8ad 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -2742,7 +2742,7 @@ const Object = @import("Coff/Object.zig"); const Relocation = @import("Coff/Relocation.zig"); const TableSection = @import("table_section.zig").TableSection; const StringTable = @import("StringTable.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const AnalUnit = InternPool.AnalUnit; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 7d576abbb4..2bb0a4c0a0 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -2969,5 +2969,5 @@ const Zcu = @import("../Zcu.zig"); const Module = Zcu; const InternPool = @import("../InternPool.zig"); const StringTable = @import("StringTable.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 57fa610019..56311dd64b 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -1647,7 +1647,7 @@ const Module = Zcu; const Object = @import("Object.zig"); const Symbol = @import("Symbol.zig"); const StringTable = @import("../StringTable.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const AnalUnit = InternPool.AnalUnit; const ZigObject = @This(); diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig index 3f0e84d6a2..c022a30664 100644 --- a/src/link/MachO/DebugSymbols.zig +++ b/src/link/MachO/DebugSymbols.zig @@ -459,4 +459,4 @@ const trace = @import("../../tracy.zig").trace; const Allocator = mem.Allocator; const MachO = @import("../MachO.zig"); const StringTable = @import("../StringTable.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index 861ced9214..bb5ded654d 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -1587,7 +1587,7 @@ const Object = @import("Object.zig"); const Relocation = @import("Relocation.zig"); const Symbol = @import("Symbol.zig"); const StringTable = @import("../StringTable.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const AnalUnit = InternPool.AnalUnit; const ZigObject = @This(); diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 2efe569d98..96fbaf42c7 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -15,7 +15,7 @@ const File = link.File; const build_options = @import("build_options"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const AnalUnit = InternPool.AnalUnit; diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 3befedad89..d14061fe78 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -33,7 +33,7 @@ const Zcu = @import("../Zcu.zig"); const Module = Zcu; const Object = @import("Wasm/Object.zig"); const Symbol = @import("Wasm/Symbol.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const ZigObject = @import("Wasm/ZigObject.zig"); diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index ca950e5cef..24fc66367a 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -1252,7 +1252,7 @@ const Zcu = @import("../../Zcu.zig"); const Module = Zcu; const StringTable = @import("../StringTable.zig"); const Symbol = @import("Symbol.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const Wasm = @import("../Wasm.zig"); const AnalUnit = InternPool.AnalUnit; diff --git a/src/mutable_value.zig b/src/mutable_value.zig index 77c0827691..1806e6ba19 100644 --- a/src/mutable_value.zig +++ b/src/mutable_value.zig @@ -3,7 +3,7 @@ const assert = std.debug.assert; const Allocator = std.mem.Allocator; const Zcu = @import("Zcu.zig"); const InternPool = @import("InternPool.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Value = @import("Value.zig"); /// We use a tagged union here because while it wastes a few bytes for some tags, having a fixed diff --git a/src/print_air.zig b/src/print_air.zig index 2dbaf3069f..85fbe87ec9 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -4,7 +4,7 @@ const fmtIntSizeBin = std.fmt.fmtIntSizeBin; const Zcu = @import("Zcu.zig"); const Value = @import("Value.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Air = @import("Air.zig"); const Liveness = @import("Liveness.zig"); const InternPool = @import("InternPool.zig"); diff --git a/src/print_value.zig b/src/print_value.zig index 7f75b05606..d2952c3d8e 100644 --- a/src/print_value.zig +++ b/src/print_value.zig @@ -2,7 +2,7 @@ //! It is a thin wrapper around a `Value` which also, redundantly, stores its `Type`. const std = @import("std"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Value = @import("Value.zig"); const Zcu = @import("Zcu.zig"); /// Deprecated. diff --git a/src/register_manager.zig b/src/register_manager.zig index e1bc4d52fa..fb9afbbc01 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -5,7 +5,7 @@ const assert = std.debug.assert; const Allocator = std.mem.Allocator; const Air = @import("Air.zig"); const StaticBitSet = std.bit_set.StaticBitSet; -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Zcu = @import("Zcu.zig"); /// Deprecated. const Module = Zcu; diff --git a/src/target.zig b/src/target.zig index 08ccfbaaca..a253c1fa0b 100644 --- a/src/target.zig +++ b/src/target.zig @@ -1,5 +1,5 @@ const std = @import("std"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const AddressSpace = std.builtin.AddressSpace; const Alignment = @import("InternPool.zig").Alignment; const Feature = @import("Zcu.zig").Feature; diff --git a/src/type.zig b/src/type.zig deleted file mode 100644 index df93822273..0000000000 --- a/src/type.zig +++ /dev/null @@ -1,3617 +0,0 @@ -const std = @import("std"); -const builtin = @import("builtin"); -const Value = @import("Value.zig"); -const assert = std.debug.assert; -const Target = std.Target; -const Zcu = @import("Zcu.zig"); -/// Deprecated. -const Module = Zcu; -const log = std.log.scoped(.Type); -const target_util = @import("target.zig"); -const Sema = @import("Sema.zig"); -const InternPool = @import("InternPool.zig"); -const Alignment = InternPool.Alignment; -const Zir = std.zig.Zir; - -/// Both types and values are canonically represented by a single 32-bit integer -/// which is an index into an `InternPool` data structure. -/// This struct abstracts around this storage by providing methods only -/// applicable to types rather than values in general. -pub const Type = struct { - ip_index: InternPool.Index, - - pub fn zigTypeTag(ty: Type, mod: *const Module) std.builtin.TypeId { - return ty.zigTypeTagOrPoison(mod) catch unreachable; - } - - pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId { - return mod.intern_pool.zigTypeTagOrPoison(ty.toIntern()); - } - - pub fn baseZigTypeTag(self: Type, mod: *Module) std.builtin.TypeId { - return switch (self.zigTypeTag(mod)) { - .ErrorUnion => self.errorUnionPayload(mod).baseZigTypeTag(mod), - .Optional => { - return self.optionalChild(mod).baseZigTypeTag(mod); - }, - else => |t| t, - }; - } - - pub fn isSelfComparable(ty: Type, mod: *const Module, is_equality_cmp: bool) bool { - return switch (ty.zigTypeTag(mod)) { - .Int, - .Float, - .ComptimeFloat, - .ComptimeInt, - => true, - - .Vector => ty.elemType2(mod).isSelfComparable(mod, is_equality_cmp), - - .Bool, - .Type, - .Void, - .ErrorSet, - .Fn, - .Opaque, - .AnyFrame, - .Enum, - .EnumLiteral, - => is_equality_cmp, - - .NoReturn, - .Array, - .Struct, - .Undefined, - .Null, - .ErrorUnion, - .Union, - .Frame, - => false, - - .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr(mod)), - .Optional => { - if (!is_equality_cmp) return false; - return ty.optionalChild(mod).isSelfComparable(mod, is_equality_cmp); - }, - }; - } - - /// If it is a function pointer, returns the function type. Otherwise returns null. - pub fn castPtrToFn(ty: Type, mod: *const Module) ?Type { - if (ty.zigTypeTag(mod) != .Pointer) return null; - const elem_ty = ty.childType(mod); - if (elem_ty.zigTypeTag(mod) != .Fn) return null; - return elem_ty; - } - - /// Asserts the type is a pointer. - pub fn ptrIsMutable(ty: Type, mod: *const Module) bool { - return !mod.intern_pool.indexToKey(ty.toIntern()).ptr_type.flags.is_const; - } - - pub const ArrayInfo = struct { - elem_type: Type, - sentinel: ?Value = null, - len: u64, - }; - - pub fn arrayInfo(self: Type, mod: *const Module) ArrayInfo { - return .{ - .len = self.arrayLen(mod), - .sentinel = self.sentinel(mod), - .elem_type = self.childType(mod), - }; - } - - pub fn ptrInfo(ty: Type, mod: *const Module) InternPool.Key.PtrType { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |p| p, - .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { - .ptr_type => |p| p, - else => unreachable, - }, - else => unreachable, - }; - } - - pub fn eql(a: Type, b: Type, mod: *const Module) bool { - _ = mod; // TODO: remove this parameter - // The InternPool data structure hashes based on Key to make interned objects - // unique. An Index can be treated simply as u32 value for the - // purpose of Type/Value hashing and equality. - return a.toIntern() == b.toIntern(); - } - - pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { - _ = ty; - _ = unused_fmt_string; - _ = options; - _ = writer; - @compileError("do not format types directly; use either ty.fmtDebug() or ty.fmt()"); - } - - pub const Formatter = std.fmt.Formatter(format2); - - pub fn fmt(ty: Type, module: *Module) Formatter { - return .{ .data = .{ - .ty = ty, - .module = module, - } }; - } - - const FormatContext = struct { - ty: Type, - module: *Module, - }; - - fn format2( - ctx: FormatContext, - comptime unused_format_string: []const u8, - options: std.fmt.FormatOptions, - writer: anytype, - ) !void { - comptime assert(unused_format_string.len == 0); - _ = options; - return print(ctx.ty, writer, ctx.module); - } - - pub fn fmtDebug(ty: Type) std.fmt.Formatter(dump) { - return .{ .data = ty }; - } - - /// This is a debug function. In order to print types in a meaningful way - /// we also need access to the module. - pub fn dump( - start_type: Type, - comptime unused_format_string: []const u8, - options: std.fmt.FormatOptions, - writer: anytype, - ) @TypeOf(writer).Error!void { - _ = options; - comptime assert(unused_format_string.len == 0); - return writer.print("{any}", .{start_type.ip_index}); - } - - /// Prints a name suitable for `@typeName`. - /// TODO: take an `opt_sema` to pass to `fmtValue` when printing sentinels. - pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void { - const ip = &mod.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| { - const sign_char: u8 = switch (int_type.signedness) { - .signed => 'i', - .unsigned => 'u', - }; - return writer.print("{c}{d}", .{ sign_char, int_type.bits }); - }, - .ptr_type => { - const info = ty.ptrInfo(mod); - - if (info.sentinel != .none) switch (info.flags.size) { - .One, .C => unreachable, - .Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}), - .Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}), - } else switch (info.flags.size) { - .One => try writer.writeAll("*"), - .Many => try writer.writeAll("[*]"), - .C => try writer.writeAll("[*c]"), - .Slice => try writer.writeAll("[]"), - } - if (info.flags.alignment != .none or - info.packed_offset.host_size != 0 or - info.flags.vector_index != .none) - { - const alignment = if (info.flags.alignment != .none) - info.flags.alignment - else - Type.fromInterned(info.child).abiAlignment(mod); - try writer.print("align({d}", .{alignment.toByteUnits() orelse 0}); - - if (info.packed_offset.bit_offset != 0 or info.packed_offset.host_size != 0) { - try writer.print(":{d}:{d}", .{ - info.packed_offset.bit_offset, info.packed_offset.host_size, - }); - } - if (info.flags.vector_index == .runtime) { - try writer.writeAll(":?"); - } else if (info.flags.vector_index != .none) { - try writer.print(":{d}", .{@intFromEnum(info.flags.vector_index)}); - } - try writer.writeAll(") "); - } - if (info.flags.address_space != .generic) { - try writer.print("addrspace(.{s}) ", .{@tagName(info.flags.address_space)}); - } - if (info.flags.is_const) try writer.writeAll("const "); - if (info.flags.is_volatile) try writer.writeAll("volatile "); - if (info.flags.is_allowzero and info.flags.size != .C) try writer.writeAll("allowzero "); - - try print(Type.fromInterned(info.child), writer, mod); - return; - }, - .array_type => |array_type| { - if (array_type.sentinel == .none) { - try writer.print("[{d}]", .{array_type.len}); - try print(Type.fromInterned(array_type.child), writer, mod); - } else { - try writer.print("[{d}:{}]", .{ - array_type.len, - Value.fromInterned(array_type.sentinel).fmtValue(mod, null), - }); - try print(Type.fromInterned(array_type.child), writer, mod); - } - return; - }, - .vector_type => |vector_type| { - try writer.print("@Vector({d}, ", .{vector_type.len}); - try print(Type.fromInterned(vector_type.child), writer, mod); - try writer.writeAll(")"); - return; - }, - .opt_type => |child| { - try writer.writeByte('?'); - return print(Type.fromInterned(child), writer, mod); - }, - .error_union_type => |error_union_type| { - try print(Type.fromInterned(error_union_type.error_set_type), writer, mod); - try writer.writeByte('!'); - if (error_union_type.payload_type == .generic_poison_type) { - try writer.writeAll("anytype"); - } else { - try print(Type.fromInterned(error_union_type.payload_type), writer, mod); - } - return; - }, - .inferred_error_set_type => |func_index| { - try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); - const owner_decl = mod.funcOwnerDeclPtr(func_index); - try owner_decl.renderFullyQualifiedName(mod, writer); - try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); - }, - .error_set_type => |error_set_type| { - const names = error_set_type.names; - try writer.writeAll("error{"); - for (names.get(ip), 0..) |name, i| { - if (i != 0) try writer.writeByte(','); - try writer.print("{}", .{name.fmt(ip)}); - } - try writer.writeAll("}"); - }, - .simple_type => |s| switch (s) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .anyopaque, - .bool, - .void, - .type, - .anyerror, - .comptime_int, - .comptime_float, - .noreturn, - .adhoc_inferred_error_set, - => return writer.writeAll(@tagName(s)), - - .null, - .undefined, - => try writer.print("@TypeOf({s})", .{@tagName(s)}), - - .enum_literal => try writer.print("@TypeOf(.{s})", .{@tagName(s)}), - .atomic_order => try writer.writeAll("std.builtin.AtomicOrder"), - .atomic_rmw_op => try writer.writeAll("std.builtin.AtomicRmwOp"), - .calling_convention => try writer.writeAll("std.builtin.CallingConvention"), - .address_space => try writer.writeAll("std.builtin.AddressSpace"), - .float_mode => try writer.writeAll("std.builtin.FloatMode"), - .reduce_op => try writer.writeAll("std.builtin.ReduceOp"), - .call_modifier => try writer.writeAll("std.builtin.CallModifier"), - .prefetch_options => try writer.writeAll("std.builtin.PrefetchOptions"), - .export_options => try writer.writeAll("std.builtin.ExportOptions"), - .extern_options => try writer.writeAll("std.builtin.ExternOptions"), - .type_info => try writer.writeAll("std.builtin.Type"), - - .generic_poison => unreachable, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.decl.unwrap()) |decl_index| { - const decl = mod.declPtr(decl_index); - try decl.renderFullyQualifiedName(mod, writer); - } else if (ip.loadStructType(ty.toIntern()).namespace.unwrap()) |namespace_index| { - const namespace = mod.namespacePtr(namespace_index); - try namespace.renderFullyQualifiedName(mod, .empty, writer); - } else { - try writer.writeAll("@TypeOf(.{})"); - } - }, - .anon_struct_type => |anon_struct| { - if (anon_struct.types.len == 0) { - return writer.writeAll("@TypeOf(.{})"); - } - try writer.writeAll("struct{"); - for (anon_struct.types.get(ip), anon_struct.values.get(ip), 0..) |field_ty, val, i| { - if (i != 0) try writer.writeAll(", "); - if (val != .none) { - try writer.writeAll("comptime "); - } - if (anon_struct.names.len != 0) { - try writer.print("{}: ", .{anon_struct.names.get(ip)[i].fmt(&mod.intern_pool)}); - } - - try print(Type.fromInterned(field_ty), writer, mod); - - if (val != .none) { - try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(mod, null)}); - } - } - try writer.writeAll("}"); - }, - - .union_type => { - const decl = mod.declPtr(ip.loadUnionType(ty.toIntern()).decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .opaque_type => { - const decl = mod.declPtr(ip.loadOpaqueType(ty.toIntern()).decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .enum_type => { - const decl = mod.declPtr(ip.loadEnumType(ty.toIntern()).decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .func_type => |fn_info| { - if (fn_info.is_noinline) { - try writer.writeAll("noinline "); - } - try writer.writeAll("fn ("); - const param_types = fn_info.param_types.get(&mod.intern_pool); - for (param_types, 0..) |param_ty, i| { - if (i != 0) try writer.writeAll(", "); - if (std.math.cast(u5, i)) |index| { - if (fn_info.paramIsComptime(index)) { - try writer.writeAll("comptime "); - } - if (fn_info.paramIsNoalias(index)) { - try writer.writeAll("noalias "); - } - } - if (param_ty == .generic_poison_type) { - try writer.writeAll("anytype"); - } else { - try print(Type.fromInterned(param_ty), writer, mod); - } - } - if (fn_info.is_var_args) { - if (param_types.len != 0) { - try writer.writeAll(", "); - } - try writer.writeAll("..."); - } - try writer.writeAll(") "); - if (fn_info.cc != .Unspecified) { - try writer.writeAll("callconv(."); - try writer.writeAll(@tagName(fn_info.cc)); - try writer.writeAll(") "); - } - if (fn_info.return_type == .generic_poison_type) { - try writer.writeAll("anytype"); - } else { - try print(Type.fromInterned(fn_info.return_type), writer, mod); - } - }, - .anyframe_type => |child| { - if (child == .none) return writer.writeAll("anyframe"); - try writer.writeAll("anyframe->"); - return print(Type.fromInterned(child), writer, mod); - }, - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - } - } - - pub fn fromInterned(i: InternPool.Index) Type { - assert(i != .none); - return .{ .ip_index = i }; - } - - pub fn toIntern(ty: Type) InternPool.Index { - assert(ty.ip_index != .none); - return ty.ip_index; - } - - pub fn toValue(self: Type) Value { - return Value.fromInterned(self.toIntern()); - } - - const RuntimeBitsError = Module.CompileError || error{NeedLazy}; - - /// true if and only if the type takes up space in memory at runtime. - /// There are two reasons a type will return false: - /// * the type is a comptime-only type. For example, the type `type` itself. - /// - note, however, that a struct can have mixed fields and only the non-comptime-only - /// fields will count towards the ABI size. For example, `struct {T: type, x: i32}` - /// hasRuntimeBits()=true and abiSize()=4 - /// * the type has only one possible value, making its ABI size 0. - /// - an enum with an explicit tag type has the ABI size of the integer tag type, - /// making it one-possible-value only if the integer tag type has 0 bits. - /// When `ignore_comptime_only` is true, then types that are comptime-only - /// may return false positives. - pub fn hasRuntimeBitsAdvanced( - ty: Type, - mod: *Module, - ignore_comptime_only: bool, - strat: AbiAlignmentAdvancedStrat, - ) RuntimeBitsError!bool { - const ip = &mod.intern_pool; - return switch (ty.toIntern()) { - // False because it is a comptime-only type. - .empty_struct_type => false, - else => switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| int_type.bits != 0, - .ptr_type => { - // Pointers to zero-bit types still have a runtime address; however, pointers - // to comptime-only types do not, with the exception of function pointers. - if (ignore_comptime_only) return true; - return switch (strat) { - .sema => |sema| !(try sema.typeRequiresComptime(ty)), - .eager => !comptimeOnly(ty, mod), - .lazy => error.NeedLazy, - }; - }, - .anyframe_type => true, - .array_type => |array_type| return array_type.lenIncludingSentinel() > 0 and - try Type.fromInterned(array_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), - .vector_type => |vector_type| return vector_type.len > 0 and - try Type.fromInterned(vector_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), - .opt_type => |child| { - const child_ty = Type.fromInterned(child); - if (child_ty.isNoReturn(mod)) { - // Then the optional is comptime-known to be null. - return false; - } - if (ignore_comptime_only) return true; - return switch (strat) { - .sema => |sema| !(try sema.typeRequiresComptime(child_ty)), - .eager => !comptimeOnly(child_ty, mod), - .lazy => error.NeedLazy, - }; - }, - .error_union_type, - .error_set_type, - .inferred_error_set_type, - => true, - - // These are function *bodies*, not pointers. - // They return false here because they are comptime-only types. - // Special exceptions have to be made when emitting functions due to - // this returning false. - .func_type => false, - - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .bool, - .anyerror, - .adhoc_inferred_error_set, - .anyopaque, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - => true, - - // These are false because they are comptime-only types. - .void, - .type, - .comptime_int, - .comptime_float, - .noreturn, - .null, - .undefined, - .enum_literal, - .type_info, - => false, - - .generic_poison => unreachable, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.assumeRuntimeBitsIfFieldTypesWip(ip)) { - // In this case, we guess that hasRuntimeBits() for this type is true, - // and then later if our guess was incorrect, we emit a compile error. - return true; - } - switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(struct_type.haveFieldTypes(ip)), - .lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy, - } - for (0..struct_type.field_types.len) |i| { - if (struct_type.comptime_bits.getBit(ip, i)) continue; - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) - return true; - } else { - return false; - } - }, - .anon_struct_type => |tuple| { - for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { - if (val != .none) continue; // comptime field - if (try Type.fromInterned(field_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; - } - return false; - }, - - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - switch (union_type.flagsPtr(ip).runtime_tag) { - .none => { - if (union_type.flagsPtr(ip).status == .field_types_wip) { - // In this case, we guess that hasRuntimeBits() for this type is true, - // and then later if our guess was incorrect, we emit a compile error. - union_type.flagsPtr(ip).assumed_runtime_bits = true; - return true; - } - }, - .safety, .tagged => { - const tag_ty = union_type.tagTypePtr(ip).*; - // tag_ty will be `none` if this union's tag type is not resolved yet, - // in which case we want control flow to continue down below. - if (tag_ty != .none and - try Type.fromInterned(tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) - { - return true; - } - }, - } - switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(union_type.flagsPtr(ip).status.haveFieldTypes()), - .lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes()) - return error.NeedLazy, - } - for (0..union_type.field_types.len) |field_index| { - const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]); - if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) - return true; - } else { - return false; - } - }, - - .opaque_type => true, - .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }, - }; - } - - /// true if and only if the type has a well-defined memory layout - /// readFrom/writeToMemory are supported only for types with a well- - /// defined memory layout - pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .int_type, - .vector_type, - => true, - - .error_union_type, - .error_set_type, - .inferred_error_set_type, - .anon_struct_type, - .opaque_type, - .anyframe_type, - // These are function bodies, not function pointers. - .func_type, - => false, - - .array_type => |array_type| Type.fromInterned(array_type.child).hasWellDefinedLayout(mod), - .opt_type => ty.isPtrLikeOptional(mod), - .ptr_type => |ptr_type| ptr_type.flags.size != .Slice, - - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .bool, - .void, - => true, - - .anyerror, - .adhoc_inferred_error_set, - .anyopaque, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - .type, - .comptime_int, - .comptime_float, - .noreturn, - .null, - .undefined, - .enum_literal, - .type_info, - .generic_poison, - => false, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - // Struct with no fields have a well-defined layout of no bits. - return struct_type.layout != .auto or struct_type.field_types.len == 0; - }, - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - return switch (union_type.flagsPtr(ip).runtime_tag) { - .none, .safety => union_type.flagsPtr(ip).layout != .auto, - .tagged => false, - }; - }, - .enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) { - .auto => false, - .explicit, .nonexhaustive => true, - }, - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }; - } - - pub fn hasRuntimeBits(ty: Type, mod: *Module) bool { - return hasRuntimeBitsAdvanced(ty, mod, false, .eager) catch unreachable; - } - - pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { - return hasRuntimeBitsAdvanced(ty, mod, true, .eager) catch unreachable; - } - - pub fn fnHasRuntimeBits(ty: Type, mod: *Module) bool { - return ty.fnHasRuntimeBitsAdvanced(mod, null) catch unreachable; - } - - /// Determines whether a function type has runtime bits, i.e. whether a - /// function with this type can exist at runtime. - /// Asserts that `ty` is a function type. - /// If `opt_sema` is not provided, asserts that the return type is sufficiently resolved. - pub fn fnHasRuntimeBitsAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.CompileError!bool { - const fn_info = mod.typeToFunc(ty).?; - if (fn_info.is_generic) return false; - if (fn_info.is_var_args) return true; - if (fn_info.cc == .Inline) return false; - return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(mod, opt_sema); - } - - pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool { - switch (ty.zigTypeTag(mod)) { - .Fn => return ty.fnHasRuntimeBits(mod), - else => return ty.hasRuntimeBits(mod), - } - } - - /// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive. - pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { - return switch (ty.zigTypeTag(mod)) { - .Fn => true, - else => return ty.hasRuntimeBitsIgnoreComptime(mod), - }; - } - - pub fn isNoReturn(ty: Type, mod: *Module) bool { - return mod.intern_pool.isNoReturn(ty.toIntern()); - } - - /// Returns `none` if the pointer is naturally aligned and the element type is 0-bit. - pub fn ptrAlignment(ty: Type, mod: *Module) Alignment { - return ptrAlignmentAdvanced(ty, mod, null) catch unreachable; - } - - pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !Alignment { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| { - if (ptr_type.flags.alignment != .none) - return ptr_type.flags.alignment; - - if (opt_sema) |sema| { - const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .{ .sema = sema }); - return res.scalar; - } - - return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; - }, - .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(mod, opt_sema), - else => unreachable, - }; - } - - pub fn ptrAddressSpace(ty: Type, mod: *const Module) std.builtin.AddressSpace { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.address_space, - .opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.flags.address_space, - else => unreachable, - }; - } - - /// Never returns `none`. Asserts that all necessary type resolution is already done. - pub fn abiAlignment(ty: Type, mod: *Module) Alignment { - return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; - } - - /// May capture a reference to `ty`. - /// Returned value has type `comptime_int`. - pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value { - switch (try ty.abiAlignmentAdvanced(mod, .lazy)) { - .val => |val| return val, - .scalar => |x| return mod.intValue(Type.comptime_int, x.toByteUnits() orelse 0), - } - } - - pub const AbiAlignmentAdvanced = union(enum) { - scalar: Alignment, - val: Value, - }; - - pub const AbiAlignmentAdvancedStrat = union(enum) { - eager, - lazy, - sema: *Sema, - }; - - /// If you pass `eager` you will get back `scalar` and assert the type is resolved. - /// In this case there will be no error, guaranteed. - /// If you pass `lazy` you may get back `scalar` or `val`. - /// If `val` is returned, a reference to `ty` has been captured. - /// If you pass `sema` you will get back `scalar` and resolve the type if - /// necessary, possibly returning a CompileError. - pub fn abiAlignmentAdvanced( - ty: Type, - mod: *Module, - strat: AbiAlignmentAdvancedStrat, - ) Module.CompileError!AbiAlignmentAdvanced { - const target = mod.getTarget(); - const use_llvm = mod.comp.config.use_llvm; - const ip = &mod.intern_pool; - - const opt_sema = switch (strat) { - .sema => |sema| sema, - else => null, - }; - - switch (ty.toIntern()) { - .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .@"1" }, - else => switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| { - if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; - return .{ .scalar = intAbiAlignment(int_type.bits, target, use_llvm) }; - }, - .ptr_type, .anyframe_type => { - return .{ .scalar = ptrAbiAlignment(target) }; - }, - .array_type => |array_type| { - return Type.fromInterned(array_type.child).abiAlignmentAdvanced(mod, strat); - }, - .vector_type => |vector_type| { - if (vector_type.len == 0) return .{ .scalar = .@"1" }; - switch (mod.comp.getZigBackend()) { - else => { - const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema)); - if (elem_bits == 0) return .{ .scalar = .@"1" }; - const bytes = ((elem_bits * vector_type.len) + 7) / 8; - const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); - return .{ .scalar = Alignment.fromByteUnits(alignment) }; - }, - .stage2_c => { - return Type.fromInterned(vector_type.child).abiAlignmentAdvanced(mod, strat); - }, - .stage2_x86_64 => { - if (vector_type.child == .bool_type) { - if (vector_type.len > 256 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" }; - if (vector_type.len > 128 and std.Target.x86.featureSetHas(target.cpu.features, .avx2)) return .{ .scalar = .@"32" }; - if (vector_type.len > 64) return .{ .scalar = .@"16" }; - const bytes = std.math.divCeil(u32, vector_type.len, 8) catch unreachable; - const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); - return .{ .scalar = Alignment.fromByteUnits(alignment) }; - } - const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); - if (elem_bytes == 0) return .{ .scalar = .@"1" }; - const bytes = elem_bytes * vector_type.len; - if (bytes > 32 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" }; - if (bytes > 16 and std.Target.x86.featureSetHas(target.cpu.features, .avx)) return .{ .scalar = .@"32" }; - return .{ .scalar = .@"16" }; - }, - } - }, - - .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), - .error_union_type => |info| return abiAlignmentAdvancedErrorUnion(ty, mod, strat, Type.fromInterned(info.payload_type)), - - .error_set_type, .inferred_error_set_type => { - const bits = mod.errorSetBits(); - if (bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; - return .{ .scalar = intAbiAlignment(bits, target, use_llvm) }; - }, - - // represents machine code; not a pointer - .func_type => return .{ .scalar = target_util.defaultFunctionAlignment(target) }, - - .simple_type => |t| switch (t) { - .bool, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .anyopaque, - => return .{ .scalar = .@"1" }, - - .usize, - .isize, - => return .{ .scalar = intAbiAlignment(target.ptrBitWidth(), target, use_llvm) }, - - .export_options, - .extern_options, - .type_info, - => return .{ .scalar = ptrAbiAlignment(target) }, - - .c_char => return .{ .scalar = cTypeAlign(target, .char) }, - .c_short => return .{ .scalar = cTypeAlign(target, .short) }, - .c_ushort => return .{ .scalar = cTypeAlign(target, .ushort) }, - .c_int => return .{ .scalar = cTypeAlign(target, .int) }, - .c_uint => return .{ .scalar = cTypeAlign(target, .uint) }, - .c_long => return .{ .scalar = cTypeAlign(target, .long) }, - .c_ulong => return .{ .scalar = cTypeAlign(target, .ulong) }, - .c_longlong => return .{ .scalar = cTypeAlign(target, .longlong) }, - .c_ulonglong => return .{ .scalar = cTypeAlign(target, .ulonglong) }, - .c_longdouble => return .{ .scalar = cTypeAlign(target, .longdouble) }, - - .f16 => return .{ .scalar = .@"2" }, - .f32 => return .{ .scalar = cTypeAlign(target, .float) }, - .f64 => switch (target.c_type_bit_size(.double)) { - 64 => return .{ .scalar = cTypeAlign(target, .double) }, - else => return .{ .scalar = .@"8" }, - }, - .f80 => switch (target.c_type_bit_size(.longdouble)) { - 80 => return .{ .scalar = cTypeAlign(target, .longdouble) }, - else => { - const u80_ty: Type = .{ .ip_index = .u80_type }; - return .{ .scalar = abiAlignment(u80_ty, mod) }; - }, - }, - .f128 => switch (target.c_type_bit_size(.longdouble)) { - 128 => return .{ .scalar = cTypeAlign(target, .longdouble) }, - else => return .{ .scalar = .@"16" }, - }, - - .anyerror, .adhoc_inferred_error_set => { - const bits = mod.errorSetBits(); - if (bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; - return .{ .scalar = intAbiAlignment(bits, target, use_llvm) }; - }, - - .void, - .type, - .comptime_int, - .comptime_float, - .null, - .undefined, - .enum_literal, - => return .{ .scalar = .@"1" }, - - .noreturn => unreachable, - .generic_poison => unreachable, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.layout == .@"packed") { - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => if (struct_type.backingIntType(ip).* == .none) return .{ - .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))), - }, - .eager => {}, - } - return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(mod) }; - } - - const flags = struct_type.flagsPtr(ip).*; - if (flags.alignment != .none) return .{ .scalar = flags.alignment }; - - return switch (strat) { - .eager => unreachable, // struct alignment not resolved - .sema => |sema| .{ - .scalar = try sema.resolveStructAlignment(ty.toIntern(), struct_type), - }, - .lazy => .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, - }; - }, - .anon_struct_type => |tuple| { - var big_align: Alignment = .@"1"; - for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { - if (val != .none) continue; // comptime field - switch (try Type.fromInterned(field_ty).abiAlignmentAdvanced(mod, strat)) { - .scalar => |field_align| big_align = big_align.max(field_align), - .val => switch (strat) { - .eager => unreachable, // field type alignment not resolved - .sema => unreachable, // passed to abiAlignmentAdvanced above - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, - }, - } - } - return .{ .scalar = big_align }; - }, - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - const flags = union_type.flagsPtr(ip).*; - if (flags.alignment != .none) return .{ .scalar = flags.alignment }; - - if (!union_type.haveLayout(ip)) switch (strat) { - .eager => unreachable, // union layout not resolved - .sema => |sema| return .{ .scalar = try sema.resolveUnionAlignment(ty, union_type) }, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, - }; - - return .{ .scalar = union_type.flagsPtr(ip).alignment }; - }, - .opaque_type => return .{ .scalar = .@"1" }, - .enum_type => return .{ - .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiAlignment(mod), - }, - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }, - } - } - - fn abiAlignmentAdvancedErrorUnion( - ty: Type, - mod: *Module, - strat: AbiAlignmentAdvancedStrat, - payload_ty: Type, - ) Module.CompileError!AbiAlignmentAdvanced { - // This code needs to be kept in sync with the equivalent switch prong - // in abiSizeAdvanced. - const code_align = abiAlignment(Type.anyerror, mod); - switch (strat) { - .eager, .sema => { - if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, - else => |e| return e, - })) { - return .{ .scalar = code_align }; - } - return .{ .scalar = code_align.max( - (try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar, - ) }; - }, - .lazy => { - switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) { - .scalar => |payload_align| return .{ .scalar = code_align.max(payload_align) }, - .val => {}, - } - return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }; - }, - } - } - - fn abiAlignmentAdvancedOptional( - ty: Type, - mod: *Module, - strat: AbiAlignmentAdvancedStrat, - ) Module.CompileError!AbiAlignmentAdvanced { - const target = mod.getTarget(); - const child_type = ty.optionalChild(mod); - - switch (child_type.zigTypeTag(mod)) { - .Pointer => return .{ .scalar = ptrAbiAlignment(target) }, - .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat), - .NoReturn => return .{ .scalar = .@"1" }, - else => {}, - } - - switch (strat) { - .eager, .sema => { - if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, - else => |e| return e, - })) { - return .{ .scalar = .@"1" }; - } - return child_type.abiAlignmentAdvanced(mod, strat); - }, - .lazy => switch (try child_type.abiAlignmentAdvanced(mod, strat)) { - .scalar => |x| return .{ .scalar = x.max(.@"1") }, - .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, - }, - } - } - - /// May capture a reference to `ty`. - pub fn lazyAbiSize(ty: Type, mod: *Module) !Value { - switch (try ty.abiSizeAdvanced(mod, .lazy)) { - .val => |val| return val, - .scalar => |x| return mod.intValue(Type.comptime_int, x), - } - } - - /// Asserts the type has the ABI size already resolved. - /// Types that return false for hasRuntimeBits() return 0. - pub fn abiSize(ty: Type, mod: *Module) u64 { - return (abiSizeAdvanced(ty, mod, .eager) catch unreachable).scalar; - } - - const AbiSizeAdvanced = union(enum) { - scalar: u64, - val: Value, - }; - - /// If you pass `eager` you will get back `scalar` and assert the type is resolved. - /// In this case there will be no error, guaranteed. - /// If you pass `lazy` you may get back `scalar` or `val`. - /// If `val` is returned, a reference to `ty` has been captured. - /// If you pass `sema` you will get back `scalar` and resolve the type if - /// necessary, possibly returning a CompileError. - pub fn abiSizeAdvanced( - ty: Type, - mod: *Module, - strat: AbiAlignmentAdvancedStrat, - ) Module.CompileError!AbiSizeAdvanced { - const target = mod.getTarget(); - const use_llvm = mod.comp.config.use_llvm; - const ip = &mod.intern_pool; - - switch (ty.toIntern()) { - .empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 }, - - else => switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| { - if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; - return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target, use_llvm) }; - }, - .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .Slice => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, - else => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - }, - .anyframe_type => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - - .array_type => |array_type| { - const len = array_type.lenIncludingSentinel(); - if (len == 0) return .{ .scalar = 0 }; - switch (try Type.fromInterned(array_type.child).abiSizeAdvanced(mod, strat)) { - .scalar => |elem_size| return .{ .scalar = len * elem_size }, - .val => switch (strat) { - .sema, .eager => unreachable, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - }, - } - }, - .vector_type => |vector_type| { - const opt_sema = switch (strat) { - .sema => |sema| sema, - .eager => null, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - }; - const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { - .scalar => |x| x, - .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - }; - const total_bytes = switch (mod.comp.getZigBackend()) { - else => total_bytes: { - const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema); - const total_bits = elem_bits * vector_type.len; - break :total_bytes (total_bits + 7) / 8; - }, - .stage2_c => total_bytes: { - const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); - break :total_bytes elem_bytes * vector_type.len; - }, - .stage2_x86_64 => total_bytes: { - if (vector_type.child == .bool_type) break :total_bytes std.math.divCeil(u32, vector_type.len, 8) catch unreachable; - const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); - break :total_bytes elem_bytes * vector_type.len; - }, - }; - return AbiSizeAdvanced{ .scalar = alignment.forward(total_bytes) }; - }, - - .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), - - .error_set_type, .inferred_error_set_type => { - const bits = mod.errorSetBits(); - if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; - return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target, use_llvm) }; - }, - - .error_union_type => |error_union_type| { - const payload_ty = Type.fromInterned(error_union_type.payload_type); - // This code needs to be kept in sync with the equivalent switch prong - // in abiAlignmentAdvanced. - const code_size = abiSize(Type.anyerror, mod); - if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - else => |e| return e, - })) { - // Same as anyerror. - return AbiSizeAdvanced{ .scalar = code_size }; - } - const code_align = abiAlignment(Type.anyerror, mod); - const payload_align = abiAlignment(payload_ty, mod); - const payload_size = switch (try payload_ty.abiSizeAdvanced(mod, strat)) { - .scalar => |elem_size| elem_size, - .val => switch (strat) { - .sema => unreachable, - .eager => unreachable, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - }, - }; - - var size: u64 = 0; - if (code_align.compare(.gt, payload_align)) { - size += code_size; - size = payload_align.forward(size); - size += payload_size; - size = code_align.forward(size); - } else { - size += payload_size; - size = code_align.forward(size); - size += code_size; - size = payload_align.forward(size); - } - return AbiSizeAdvanced{ .scalar = size }; - }, - .func_type => unreachable, // represents machine code; not a pointer - .simple_type => |t| switch (t) { - .bool, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - => return AbiSizeAdvanced{ .scalar = 1 }, - - .f16 => return AbiSizeAdvanced{ .scalar = 2 }, - .f32 => return AbiSizeAdvanced{ .scalar = 4 }, - .f64 => return AbiSizeAdvanced{ .scalar = 8 }, - .f128 => return AbiSizeAdvanced{ .scalar = 16 }, - .f80 => switch (target.c_type_bit_size(.longdouble)) { - 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, - else => { - const u80_ty: Type = .{ .ip_index = .u80_type }; - return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) }; - }, - }, - - .usize, - .isize, - => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - - .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, - .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, - .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) }, - .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) }, - .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) }, - .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) }, - .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) }, - .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) }, - .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) }, - .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, - - .anyopaque, - .void, - .type, - .comptime_int, - .comptime_float, - .null, - .undefined, - .enum_literal, - => return AbiSizeAdvanced{ .scalar = 0 }, - - .anyerror, .adhoc_inferred_error_set => { - const bits = mod.errorSetBits(); - if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; - return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target, use_llvm) }; - }, - - .prefetch_options => unreachable, // missing call to resolveTypeFields - .export_options => unreachable, // missing call to resolveTypeFields - .extern_options => unreachable, // missing call to resolveTypeFields - - .type_info => unreachable, - .noreturn => unreachable, - .generic_poison => unreachable, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => switch (struct_type.layout) { - .@"packed" => { - if (struct_type.backingIntType(ip).* == .none) return .{ - .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))), - }; - }, - .auto, .@"extern" => { - if (!struct_type.haveLayout(ip)) return .{ - .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))), - }; - }, - }, - .eager => {}, - } - switch (struct_type.layout) { - .@"packed" => return .{ - .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiSize(mod), - }, - .auto, .@"extern" => { - assert(struct_type.haveLayout(ip)); - return .{ .scalar = struct_type.size(ip).* }; - }, - } - }, - .anon_struct_type => |tuple| { - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy, .eager => {}, - } - const field_count = tuple.types.len; - if (field_count == 0) { - return AbiSizeAdvanced{ .scalar = 0 }; - } - return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; - }, - - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{ - .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))), - }, - .eager => {}, - } - - assert(union_type.haveLayout(ip)); - return .{ .scalar = union_type.size(ip).* }; - }, - .opaque_type => unreachable, // no size available - .enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(mod) }, - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }, - } - } - - fn abiSizeAdvancedOptional( - ty: Type, - mod: *Module, - strat: AbiAlignmentAdvancedStrat, - ) Module.CompileError!AbiSizeAdvanced { - const child_ty = ty.optionalChild(mod); - - if (child_ty.isNoReturn(mod)) { - return AbiSizeAdvanced{ .scalar = 0 }; - } - - if (!(child_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - else => |e| return e, - })) return AbiSizeAdvanced{ .scalar = 1 }; - - if (ty.optionalReprIsPayload(mod)) { - return abiSizeAdvanced(child_ty, mod, strat); - } - - const payload_size = switch (try child_ty.abiSizeAdvanced(mod, strat)) { - .scalar => |elem_size| elem_size, - .val => switch (strat) { - .sema => unreachable, - .eager => unreachable, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - }, - }; - - // Optional types are represented as a struct with the child type as the first - // field and a boolean as the second. Since the child type's abi alignment is - // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal - // to the child type's ABI alignment. - return AbiSizeAdvanced{ - .scalar = (child_ty.abiAlignment(mod).toByteUnits() orelse 0) + payload_size, - }; - } - - pub fn ptrAbiAlignment(target: Target) Alignment { - return Alignment.fromNonzeroByteUnits(@divExact(target.ptrBitWidth(), 8)); - } - - pub fn intAbiSize(bits: u16, target: Target, use_llvm: bool) u64 { - return intAbiAlignment(bits, target, use_llvm).forward(@as(u16, @intCast((@as(u17, bits) + 7) / 8))); - } - - pub fn intAbiAlignment(bits: u16, target: Target, use_llvm: bool) Alignment { - return switch (target.cpu.arch) { - .x86 => switch (bits) { - 0 => .none, - 1...8 => .@"1", - 9...16 => .@"2", - 17...64 => .@"4", - else => .@"16", - }, - .x86_64 => switch (bits) { - 0 => .none, - 1...8 => .@"1", - 9...16 => .@"2", - 17...32 => .@"4", - 33...64 => .@"8", - else => switch (target_util.zigBackend(target, use_llvm)) { - .stage2_x86_64 => .@"8", - else => .@"16", - }, - }, - else => return Alignment.fromByteUnits(@min( - std.math.ceilPowerOfTwoPromote(u16, @as(u16, @intCast((@as(u17, bits) + 7) / 8))), - maxIntAlignment(target, use_llvm), - )), - }; - } - - pub fn maxIntAlignment(target: std.Target, use_llvm: bool) u16 { - return switch (target.cpu.arch) { - .avr => 1, - .msp430 => 2, - .xcore => 4, - - .arm, - .armeb, - .thumb, - .thumbeb, - .hexagon, - .mips, - .mipsel, - .powerpc, - .powerpcle, - .r600, - .amdgcn, - .riscv32, - .sparc, - .sparcel, - .s390x, - .lanai, - .wasm32, - .wasm64, - => 8, - - // For these, LLVMABIAlignmentOfType(i128) reports 8. Note that 16 - // is a relevant number in three cases: - // 1. Different machine code instruction when loading into SIMD register. - // 2. The C ABI wants 16 for extern structs. - // 3. 16-byte cmpxchg needs 16-byte alignment. - // Same logic for powerpc64, mips64, sparc64. - .powerpc64, - .powerpc64le, - .mips64, - .mips64el, - .sparc64, - => switch (target.ofmt) { - .c => 16, - else => 8, - }, - - .x86_64 => switch (target_util.zigBackend(target, use_llvm)) { - .stage2_x86_64 => 8, - else => 16, - }, - - // Even LLVMABIAlignmentOfType(i128) agrees on these targets. - .x86, - .aarch64, - .aarch64_be, - .aarch64_32, - .riscv64, - .bpfel, - .bpfeb, - .nvptx, - .nvptx64, - => 16, - - // Below this comment are unverified but based on the fact that C requires - // int128_t to be 16 bytes aligned, it's a safe default. - .spu_2, - .csky, - .arc, - .m68k, - .tce, - .tcele, - .le32, - .amdil, - .hsail, - .spir, - .kalimba, - .renderscript32, - .spirv, - .spirv32, - .shave, - .le64, - .amdil64, - .hsail64, - .spir64, - .renderscript64, - .ve, - .spirv64, - .dxil, - .loongarch32, - .loongarch64, - .xtensa, - => 16, - }; - } - - pub fn bitSize(ty: Type, mod: *Module) u64 { - return bitSizeAdvanced(ty, mod, null) catch unreachable; - } - - /// If you pass `opt_sema`, any recursive type resolutions will happen if - /// necessary, possibly returning a CompileError. Passing `null` instead asserts - /// the type is fully resolved, and there will be no error, guaranteed. - pub fn bitSizeAdvanced( - ty: Type, - mod: *Module, - opt_sema: ?*Sema, - ) Module.CompileError!u64 { - const target = mod.getTarget(); - const ip = &mod.intern_pool; - - const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - - switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| return int_type.bits, - .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .Slice => return target.ptrBitWidth() * 2, - else => return target.ptrBitWidth(), - }, - .anyframe_type => return target.ptrBitWidth(), - - .array_type => |array_type| { - const len = array_type.lenIncludingSentinel(); - if (len == 0) return 0; - const elem_ty = Type.fromInterned(array_type.child); - const elem_size = @max( - (try elem_ty.abiAlignmentAdvanced(mod, strat)).scalar.toByteUnits() orelse 0, - (try elem_ty.abiSizeAdvanced(mod, strat)).scalar, - ); - if (elem_size == 0) return 0; - const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); - return (len - 1) * 8 * elem_size + elem_bit_size; - }, - .vector_type => |vector_type| { - const child_ty = Type.fromInterned(vector_type.child); - const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); - return elem_bit_size * vector_type.len; - }, - .opt_type => { - // Optionals and error unions are not packed so their bitsize - // includes padding bits. - return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; - }, - - .error_set_type, .inferred_error_set_type => return mod.errorSetBits(), - - .error_union_type => { - // Optionals and error unions are not packed so their bitsize - // includes padding bits. - return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; - }, - .func_type => unreachable, // represents machine code; not a pointer - .simple_type => |t| switch (t) { - .f16 => return 16, - .f32 => return 32, - .f64 => return 64, - .f80 => return 80, - .f128 => return 128, - - .usize, - .isize, - => return target.ptrBitWidth(), - - .c_char => return target.c_type_bit_size(.char), - .c_short => return target.c_type_bit_size(.short), - .c_ushort => return target.c_type_bit_size(.ushort), - .c_int => return target.c_type_bit_size(.int), - .c_uint => return target.c_type_bit_size(.uint), - .c_long => return target.c_type_bit_size(.long), - .c_ulong => return target.c_type_bit_size(.ulong), - .c_longlong => return target.c_type_bit_size(.longlong), - .c_ulonglong => return target.c_type_bit_size(.ulonglong), - .c_longdouble => return target.c_type_bit_size(.longdouble), - - .bool => return 1, - .void => return 0, - - .anyerror, - .adhoc_inferred_error_set, - => return mod.errorSetBits(), - - .anyopaque => unreachable, - .type => unreachable, - .comptime_int => unreachable, - .comptime_float => unreachable, - .noreturn => unreachable, - .null => unreachable, - .undefined => unreachable, - .enum_literal => unreachable, - .generic_poison => unreachable, - - .atomic_order => unreachable, - .atomic_rmw_op => unreachable, - .calling_convention => unreachable, - .address_space => unreachable, - .float_mode => unreachable, - .reduce_op => unreachable, - .call_modifier => unreachable, - .prefetch_options => unreachable, - .export_options => unreachable, - .extern_options => unreachable, - .type_info => unreachable, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - const is_packed = struct_type.layout == .@"packed"; - if (opt_sema) |sema| { - try sema.resolveTypeFields(ty); - if (is_packed) try sema.resolveTypeLayout(ty); - } - if (is_packed) { - return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(mod, opt_sema); - } - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - }, - - .anon_struct_type => { - if (opt_sema) |sema| try sema.resolveTypeFields(ty); - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - }, - - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - const is_packed = ty.containerLayout(mod) == .@"packed"; - if (opt_sema) |sema| { - try sema.resolveTypeFields(ty); - if (is_packed) try sema.resolveTypeLayout(ty); - } - if (!is_packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - } - assert(union_type.flagsPtr(ip).status.haveFieldTypes()); - - var size: u64 = 0; - for (0..union_type.field_types.len) |field_index| { - const field_ty = union_type.field_types.get(ip)[field_index]; - size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, opt_sema)); - } - - return size; - }, - .opaque_type => unreachable, - .enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, opt_sema), - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - } - } - - /// Returns true if the type's layout is already resolved and it is safe - /// to use `abiSize`, `abiAlignment` and `bitSize` on it. - pub fn layoutIsResolved(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).haveLayout(ip), - .union_type => ip.loadUnionType(ty.toIntern()).haveLayout(ip), - .array_type => |array_type| { - if (array_type.lenIncludingSentinel() == 0) return true; - return Type.fromInterned(array_type.child).layoutIsResolved(mod); - }, - .opt_type => |child| Type.fromInterned(child).layoutIsResolved(mod), - .error_union_type => |k| Type.fromInterned(k.payload_type).layoutIsResolved(mod), - else => true, - }; - } - - pub fn isSinglePointer(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_info| ptr_info.flags.size == .One, - else => false, - }; - } - - /// Asserts `ty` is a pointer. - pub fn ptrSize(ty: Type, mod: *const Module) std.builtin.Type.Pointer.Size { - return ptrSizeOrNull(ty, mod).?; - } - - /// Returns `null` if `ty` is not a pointer. - pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_info| ptr_info.flags.size, - else => null, - }; - } - - pub fn isSlice(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.size == .Slice, - else => false, - }; - } - - pub fn slicePtrFieldType(ty: Type, mod: *const Module) Type { - return Type.fromInterned(mod.intern_pool.slicePtrType(ty.toIntern())); - } - - pub fn isConstPtr(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.is_const, - else => false, - }; - } - - pub fn isVolatilePtr(ty: Type, mod: *const Module) bool { - return isVolatilePtrIp(ty, &mod.intern_pool); - } - - pub fn isVolatilePtrIp(ty: Type, ip: *const InternPool) bool { - return switch (ip.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.is_volatile, - else => false, - }; - } - - pub fn isAllowzeroPtr(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.is_allowzero, - .opt_type => true, - else => false, - }; - } - - pub fn isCPtr(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.size == .C, - else => false, - }; - } - - pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .Slice => false, - .One, .Many, .C => true, - }, - .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { - .ptr_type => |p| switch (p.flags.size) { - .Slice, .C => false, - .Many, .One => !p.flags.is_allowzero, - }, - else => false, - }, - else => false, - }; - } - - /// For pointer-like optionals, returns true, otherwise returns the allowzero property - /// of pointers. - pub fn ptrAllowsZero(ty: Type, mod: *const Module) bool { - if (ty.isPtrLikeOptional(mod)) { - return true; - } - return ty.ptrInfo(mod).flags.is_allowzero; - } - - /// See also `isPtrLikeOptional`. - pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .opt_type => |child_type| child_type == .anyerror_type or switch (mod.intern_pool.indexToKey(child_type)) { - .ptr_type => |ptr_type| ptr_type.flags.size != .C and !ptr_type.flags.is_allowzero, - .error_set_type, .inferred_error_set_type => true, - else => false, - }, - .ptr_type => |ptr_type| ptr_type.flags.size == .C, - else => false, - }; - } - - /// Returns true if the type is optional and would be lowered to a single pointer - /// address value, using 0 for null. Note that this returns true for C pointers. - /// This function must be kept in sync with `Sema.typePtrOrOptionalPtrTy`. - pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.size == .C, - .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { - .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .Slice, .C => false, - .Many, .One => !ptr_type.flags.is_allowzero, - }, - else => false, - }, - else => false, - }; - } - - /// For *[N]T, returns [N]T. - /// For *T, returns T. - /// For [*]T, returns T. - pub fn childType(ty: Type, mod: *const Module) Type { - return childTypeIp(ty, &mod.intern_pool); - } - - pub fn childTypeIp(ty: Type, ip: *const InternPool) Type { - return Type.fromInterned(ip.childType(ty.toIntern())); - } - - /// For *[N]T, returns T. - /// For ?*T, returns T. - /// For ?*[N]T, returns T. - /// For ?[*]T, returns T. - /// For *T, returns T. - /// For [*]T, returns T. - /// For [N]T, returns T. - /// For []T, returns T. - /// For anyframe->T, returns T. - pub fn elemType2(ty: Type, mod: *const Module) Type { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .One => Type.fromInterned(ptr_type.child).shallowElemType(mod), - .Many, .C, .Slice => Type.fromInterned(ptr_type.child), - }, - .anyframe_type => |child| { - assert(child != .none); - return Type.fromInterned(child); - }, - .vector_type => |vector_type| Type.fromInterned(vector_type.child), - .array_type => |array_type| Type.fromInterned(array_type.child), - .opt_type => |child| Type.fromInterned(mod.intern_pool.childType(child)), - else => unreachable, - }; - } - - fn shallowElemType(child_ty: Type, mod: *const Module) Type { - return switch (child_ty.zigTypeTag(mod)) { - .Array, .Vector => child_ty.childType(mod), - else => child_ty, - }; - } - - /// For vectors, returns the element type. Otherwise returns self. - pub fn scalarType(ty: Type, mod: *Module) Type { - return switch (ty.zigTypeTag(mod)) { - .Vector => ty.childType(mod), - else => ty, - }; - } - - /// Asserts that the type is an optional. - /// Note that for C pointers this returns the type unmodified. - pub fn optionalChild(ty: Type, mod: *const Module) Type { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .opt_type => |child| Type.fromInterned(child), - .ptr_type => |ptr_type| b: { - assert(ptr_type.flags.size == .C); - break :b ty; - }, - else => unreachable, - }; - } - - /// Returns the tag type of a union, if the type is a union and it has a tag type. - /// Otherwise, returns `null`. - pub fn unionTagType(ty: Type, mod: *Module) ?Type { - const ip = &mod.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .union_type => {}, - else => return null, - } - const union_type = ip.loadUnionType(ty.toIntern()); - switch (union_type.flagsPtr(ip).runtime_tag) { - .tagged => { - assert(union_type.flagsPtr(ip).status.haveFieldTypes()); - return Type.fromInterned(union_type.enum_tag_ty); - }, - else => return null, - } - } - - /// Same as `unionTagType` but includes safety tag. - /// Codegen should use this version. - pub fn unionTagTypeSafety(ty: Type, mod: *Module) ?Type { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - if (!union_type.hasTag(ip)) return null; - assert(union_type.haveFieldTypes(ip)); - return Type.fromInterned(union_type.enum_tag_ty); - }, - else => null, - }; - } - - /// Asserts the type is a union; returns the tag type, even if the tag will - /// not be stored at runtime. - pub fn unionTagTypeHypothetical(ty: Type, mod: *Module) Type { - const union_obj = mod.typeToUnion(ty).?; - return Type.fromInterned(union_obj.enum_tag_ty); - } - - pub fn unionFieldType(ty: Type, enum_tag: Value, mod: *Module) ?Type { - const ip = &mod.intern_pool; - const union_obj = mod.typeToUnion(ty).?; - const union_fields = union_obj.field_types.get(ip); - const index = mod.unionTagFieldIndex(union_obj, enum_tag) orelse return null; - return Type.fromInterned(union_fields[index]); - } - - pub fn unionFieldTypeByIndex(ty: Type, index: usize, mod: *Module) Type { - const ip = &mod.intern_pool; - const union_obj = mod.typeToUnion(ty).?; - return Type.fromInterned(union_obj.field_types.get(ip)[index]); - } - - pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 { - const union_obj = mod.typeToUnion(ty).?; - return mod.unionTagFieldIndex(union_obj, enum_tag); - } - - pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - const union_obj = mod.typeToUnion(ty).?; - for (union_obj.field_types.get(ip)) |field_ty| { - if (Type.fromInterned(field_ty).hasRuntimeBits(mod)) return false; - } - return true; - } - - /// Returns the type used for backing storage of this union during comptime operations. - /// Asserts the type is either an extern or packed union. - pub fn unionBackingType(ty: Type, mod: *Module) !Type { - return switch (ty.containerLayout(mod)) { - .@"extern" => try mod.arrayType(.{ .len = ty.abiSize(mod), .child = .u8_type }), - .@"packed" => try mod.intType(.unsigned, @intCast(ty.bitSize(mod))), - .auto => unreachable, - }; - } - - pub fn unionGetLayout(ty: Type, mod: *Module) Module.UnionLayout { - const ip = &mod.intern_pool; - const union_obj = ip.loadUnionType(ty.toIntern()); - return mod.getUnionLayout(union_obj); - } - - pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).layout, - .anon_struct_type => .auto, - .union_type => ip.loadUnionType(ty.toIntern()).flagsPtr(ip).layout, - else => unreachable, - }; - } - - /// Asserts that the type is an error union. - pub fn errorUnionPayload(ty: Type, mod: *Module) Type { - return Type.fromInterned(mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.payload_type); - } - - /// Asserts that the type is an error union. - pub fn errorUnionSet(ty: Type, mod: *Module) Type { - return Type.fromInterned(mod.intern_pool.errorUnionSet(ty.toIntern())); - } - - /// Returns false for unresolved inferred error sets. - pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ty.toIntern()) { - .anyerror_type, .adhoc_inferred_error_set_type => false, - else => switch (ip.indexToKey(ty.toIntern())) { - .error_set_type => |error_set_type| error_set_type.names.len == 0, - .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { - .none, .anyerror_type => false, - else => |t| ip.indexToKey(t).error_set_type.names.len == 0, - }, - else => unreachable, - }, - }; - } - - /// Returns true if it is an error set that includes anyerror, false otherwise. - /// Note that the result may be a false negative if the type did not get error set - /// resolution prior to this call. - pub fn isAnyError(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ty.toIntern()) { - .anyerror_type => true, - .adhoc_inferred_error_set_type => false, - else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .inferred_error_set_type => |i| ip.funcIesResolved(i).* == .anyerror_type, - else => false, - }, - }; - } - - pub fn isError(ty: Type, mod: *const Module) bool { - return switch (ty.zigTypeTag(mod)) { - .ErrorUnion, .ErrorSet => true, - else => false, - }; - } - - /// Returns whether ty, which must be an error set, includes an error `name`. - /// Might return a false negative if `ty` is an inferred error set and not fully - /// resolved yet. - pub fn errorSetHasFieldIp( - ip: *const InternPool, - ty: InternPool.Index, - name: InternPool.NullTerminatedString, - ) bool { - return switch (ty) { - .anyerror_type => true, - else => switch (ip.indexToKey(ty)) { - .error_set_type => |error_set_type| error_set_type.nameIndex(ip, name) != null, - .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { - .anyerror_type => true, - .none => false, - else => |t| ip.indexToKey(t).error_set_type.nameIndex(ip, name) != null, - }, - else => unreachable, - }, - }; - } - - /// Returns whether ty, which must be an error set, includes an error `name`. - /// Might return a false negative if `ty` is an inferred error set and not fully - /// resolved yet. - pub fn errorSetHasField(ty: Type, name: []const u8, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ty.toIntern()) { - .anyerror_type => true, - else => switch (ip.indexToKey(ty.toIntern())) { - .error_set_type => |error_set_type| { - // If the string is not interned, then the field certainly is not present. - const field_name_interned = ip.getString(name).unwrap() orelse return false; - return error_set_type.nameIndex(ip, field_name_interned) != null; - }, - .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { - .anyerror_type => true, - .none => false, - else => |t| { - // If the string is not interned, then the field certainly is not present. - const field_name_interned = ip.getString(name).unwrap() orelse return false; - return ip.indexToKey(t).error_set_type.nameIndex(ip, field_name_interned) != null; - }, - }, - else => unreachable, - }, - }; - } - - /// Asserts the type is an array or vector or struct. - pub fn arrayLen(ty: Type, mod: *const Module) u64 { - return ty.arrayLenIp(&mod.intern_pool); - } - - pub fn arrayLenIp(ty: Type, ip: *const InternPool) u64 { - return ip.aggregateTypeLen(ty.toIntern()); - } - - pub fn arrayLenIncludingSentinel(ty: Type, mod: *const Module) u64 { - return mod.intern_pool.aggregateTypeLenIncludingSentinel(ty.toIntern()); - } - - pub fn vectorLen(ty: Type, mod: *const Module) u32 { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .vector_type => |vector_type| vector_type.len, - .anon_struct_type => |tuple| @intCast(tuple.types.len), - else => unreachable, - }; - } - - /// Asserts the type is an array, pointer or vector. - pub fn sentinel(ty: Type, mod: *const Module) ?Value { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .vector_type, - .struct_type, - .anon_struct_type, - => null, - - .array_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null, - .ptr_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null, - - else => unreachable, - }; - } - - /// Returns true if and only if the type is a fixed-width integer. - pub fn isInt(self: Type, mod: *const Module) bool { - return self.toIntern() != .comptime_int_type and - mod.intern_pool.isIntegerType(self.toIntern()); - } - - /// Returns true if and only if the type is a fixed-width, signed integer. - pub fn isSignedInt(ty: Type, mod: *const Module) bool { - return switch (ty.toIntern()) { - .c_char_type => mod.getTarget().charSignedness() == .signed, - .isize_type, .c_short_type, .c_int_type, .c_long_type, .c_longlong_type => true, - else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .int_type => |int_type| int_type.signedness == .signed, - else => false, - }, - }; - } - - /// Returns true if and only if the type is a fixed-width, unsigned integer. - pub fn isUnsignedInt(ty: Type, mod: *const Module) bool { - return switch (ty.toIntern()) { - .c_char_type => mod.getTarget().charSignedness() == .unsigned, - .usize_type, .c_ushort_type, .c_uint_type, .c_ulong_type, .c_ulonglong_type => true, - else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .int_type => |int_type| int_type.signedness == .unsigned, - else => false, - }, - }; - } - - /// Returns true for integers, enums, error sets, and packed structs. - /// If this function returns true, then intInfo() can be called on the type. - pub fn isAbiInt(ty: Type, mod: *Module) bool { - return switch (ty.zigTypeTag(mod)) { - .Int, .Enum, .ErrorSet => true, - .Struct => ty.containerLayout(mod) == .@"packed", - else => false, - }; - } - - /// Asserts the type is an integer, enum, error set, or vector of one of them. - pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType { - const ip = &mod.intern_pool; - const target = mod.getTarget(); - var ty = starting_ty; - - while (true) switch (ty.toIntern()) { - .anyerror_type, .adhoc_inferred_error_set_type => { - return .{ .signedness = .unsigned, .bits = mod.errorSetBits() }; - }, - .usize_type => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, - .isize_type => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, - .c_char_type => return .{ .signedness = mod.getTarget().charSignedness(), .bits = target.c_type_bit_size(.char) }, - .c_short_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, - .c_ushort_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, - .c_int_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) }, - .c_uint_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) }, - .c_long_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) }, - .c_ulong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, - .c_longlong_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, - .c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, - else => switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| return int_type, - .struct_type => ty = Type.fromInterned(ip.loadStructType(ty.toIntern()).backingIntType(ip).*), - .enum_type => ty = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), - .vector_type => |vector_type| ty = Type.fromInterned(vector_type.child), - - .error_set_type, .inferred_error_set_type => { - return .{ .signedness = .unsigned, .bits = mod.errorSetBits() }; - }, - - .anon_struct_type => unreachable, - - .ptr_type => unreachable, - .anyframe_type => unreachable, - .array_type => unreachable, - - .opt_type => unreachable, - .error_union_type => unreachable, - .func_type => unreachable, - .simple_type => unreachable, // handled via Index enum tag above - - .union_type => unreachable, - .opaque_type => unreachable, - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }, - }; - } - - pub fn isNamedInt(ty: Type) bool { - return switch (ty.toIntern()) { - .usize_type, - .isize_type, - .c_char_type, - .c_short_type, - .c_ushort_type, - .c_int_type, - .c_uint_type, - .c_long_type, - .c_ulong_type, - .c_longlong_type, - .c_ulonglong_type, - => true, - - else => false, - }; - } - - /// Returns `false` for `comptime_float`. - pub fn isRuntimeFloat(ty: Type) bool { - return switch (ty.toIntern()) { - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .c_longdouble_type, - => true, - - else => false, - }; - } - - /// Returns `true` for `comptime_float`. - pub fn isAnyFloat(ty: Type) bool { - return switch (ty.toIntern()) { - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .c_longdouble_type, - .comptime_float_type, - => true, - - else => false, - }; - } - - /// Asserts the type is a fixed-size float or comptime_float. - /// Returns 128 for comptime_float types. - pub fn floatBits(ty: Type, target: Target) u16 { - return switch (ty.toIntern()) { - .f16_type => 16, - .f32_type => 32, - .f64_type => 64, - .f80_type => 80, - .f128_type, .comptime_float_type => 128, - .c_longdouble_type => target.c_type_bit_size(.longdouble), - - else => unreachable, - }; - } - - /// Asserts the type is a function or a function pointer. - pub fn fnReturnType(ty: Type, mod: *Module) Type { - return Type.fromInterned(mod.intern_pool.funcTypeReturnType(ty.toIntern())); - } - - /// Asserts the type is a function. - pub fn fnCallingConvention(ty: Type, mod: *Module) std.builtin.CallingConvention { - return mod.intern_pool.indexToKey(ty.toIntern()).func_type.cc; - } - - pub fn isValidParamType(self: Type, mod: *const Module) bool { - return switch (self.zigTypeTagOrPoison(mod) catch return true) { - .Opaque, .NoReturn => false, - else => true, - }; - } - - pub fn isValidReturnType(self: Type, mod: *const Module) bool { - return switch (self.zigTypeTagOrPoison(mod) catch return true) { - .Opaque => false, - else => true, - }; - } - - /// Asserts the type is a function. - pub fn fnIsVarArgs(ty: Type, mod: *Module) bool { - return mod.intern_pool.indexToKey(ty.toIntern()).func_type.is_var_args; - } - - pub fn isNumeric(ty: Type, mod: *const Module) bool { - return switch (ty.toIntern()) { - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .c_longdouble_type, - .comptime_int_type, - .comptime_float_type, - .usize_type, - .isize_type, - .c_char_type, - .c_short_type, - .c_ushort_type, - .c_int_type, - .c_uint_type, - .c_long_type, - .c_ulong_type, - .c_longlong_type, - .c_ulonglong_type, - => true, - - else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .int_type => true, - else => false, - }, - }; - } - - /// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which - /// resolves field types rather than asserting they are already resolved. - pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { - var ty = starting_type; - const ip = &mod.intern_pool; - while (true) switch (ty.toIntern()) { - .empty_struct_type => return Value.empty_struct, - - else => switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| { - if (int_type.bits == 0) { - return try mod.intValue(ty, 0); - } else { - return null; - } - }, - - .ptr_type, - .error_union_type, - .func_type, - .anyframe_type, - .error_set_type, - .inferred_error_set_type, - => return null, - - inline .array_type, .vector_type => |seq_type, seq_tag| { - const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none; - if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = &.{} }, - } }))); - if (try Type.fromInterned(seq_type.child).onePossibleValue(mod)) |opv| { - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .repeated_elem = opv.toIntern() }, - } }))); - } - return null; - }, - .opt_type => |child| { - if (child == .noreturn_type) { - return try mod.nullValue(ty); - } else { - return null; - } - }, - - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .anyopaque, - .bool, - .type, - .anyerror, - .comptime_int, - .comptime_float, - .enum_literal, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - .adhoc_inferred_error_set, - => return null, - - .void => return Value.void, - .noreturn => return Value.@"unreachable", - .null => return Value.null, - .undefined => return Value.undef, - - .generic_poison => unreachable, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - assert(struct_type.haveFieldTypes(ip)); - if (struct_type.knownNonOpv(ip)) - return null; - const field_vals = try mod.gpa.alloc(InternPool.Index, struct_type.field_types.len); - defer mod.gpa.free(field_vals); - for (field_vals, 0..) |*field_val, i_usize| { - const i: u32 = @intCast(i_usize); - if (struct_type.fieldIsComptime(ip, i)) { - assert(struct_type.haveFieldInits(ip)); - field_val.* = struct_type.field_inits.get(ip)[i]; - continue; - } - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - if (try field_ty.onePossibleValue(mod)) |field_opv| { - field_val.* = field_opv.toIntern(); - } else return null; - } - - // In this case the struct has no runtime-known fields and - // therefore has one possible value. - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = field_vals }, - } }))); - }, - - .anon_struct_type => |tuple| { - for (tuple.values.get(ip)) |val| { - if (val == .none) return null; - } - // In this case the struct has all comptime-known fields and - // therefore has one possible value. - // TODO: write something like getCoercedInts to avoid needing to dupe - const duped_values = try mod.gpa.dupe(InternPool.Index, tuple.values.get(ip)); - defer mod.gpa.free(duped_values); - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = duped_values }, - } }))); - }, - - .union_type => { - const union_obj = ip.loadUnionType(ty.toIntern()); - const tag_val = (try Type.fromInterned(union_obj.enum_tag_ty).onePossibleValue(mod)) orelse - return null; - if (union_obj.field_types.len == 0) { - const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); - return Value.fromInterned(only); - } - const only_field_ty = union_obj.field_types.get(ip)[0]; - const val_val = (try Type.fromInterned(only_field_ty).onePossibleValue(mod)) orelse - return null; - const only = try mod.intern(.{ .un = .{ - .ty = ty.toIntern(), - .tag = tag_val.toIntern(), - .val = val_val.toIntern(), - } }); - return Value.fromInterned(only); - }, - .opaque_type => return null, - .enum_type => { - const enum_type = ip.loadEnumType(ty.toIntern()); - switch (enum_type.tag_mode) { - .nonexhaustive => { - if (enum_type.tag_ty == .comptime_int_type) return null; - - if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(mod)) |int_opv| { - const only = try mod.intern(.{ .enum_tag = .{ - .ty = ty.toIntern(), - .int = int_opv.toIntern(), - } }); - return Value.fromInterned(only); - } - - return null; - }, - .auto, .explicit => { - if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null; - - switch (enum_type.names.len) { - 0 => { - const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); - return Value.fromInterned(only); - }, - 1 => { - if (enum_type.values.len == 0) { - const only = try mod.intern(.{ .enum_tag = .{ - .ty = ty.toIntern(), - .int = try mod.intern(.{ .int = .{ - .ty = enum_type.tag_ty, - .storage = .{ .u64 = 0 }, - } }), - } }); - return Value.fromInterned(only); - } else { - return Value.fromInterned(enum_type.values.get(ip)[0]); - } - }, - else => return null, - } - }, - } - }, - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }, - }; - } - - /// During semantic analysis, instead call `Sema.typeRequiresComptime` which - /// resolves field types rather than asserting they are already resolved. - pub fn comptimeOnly(ty: Type, mod: *Module) bool { - return ty.comptimeOnlyAdvanced(mod, null) catch unreachable; - } - - /// `generic_poison` will return false. - /// May return false negatives when structs and unions are having their field types resolved. - /// If `opt_sema` is not provided, asserts that the type is sufficiently resolved. - pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.CompileError!bool { - const ip = &mod.intern_pool; - return switch (ty.toIntern()) { - .empty_struct_type => false, - - else => switch (ip.indexToKey(ty.toIntern())) { - .int_type => false, - .ptr_type => |ptr_type| { - const child_ty = Type.fromInterned(ptr_type.child); - switch (child_ty.zigTypeTag(mod)) { - .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(mod, opt_sema), - .Opaque => return false, - else => return child_ty.comptimeOnlyAdvanced(mod, opt_sema), - } - }, - .anyframe_type => |child| { - if (child == .none) return false; - return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema); - }, - .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(mod, opt_sema), - .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(mod, opt_sema), - .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema), - .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(mod, opt_sema), - - .error_set_type, - .inferred_error_set_type, - => false, - - // These are function bodies, not function pointers. - .func_type => true, - - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .anyopaque, - .bool, - .void, - .anyerror, - .adhoc_inferred_error_set, - .noreturn, - .generic_poison, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - => false, - - .type, - .comptime_int, - .comptime_float, - .null, - .undefined, - .enum_literal, - .type_info, - => true, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - // packed structs cannot be comptime-only because they have a well-defined - // memory layout and every field has a well-defined bit pattern. - if (struct_type.layout == .@"packed") - return false; - - // A struct with no fields is not comptime-only. - return switch (struct_type.flagsPtr(ip).requires_comptime) { - .no, .wip => false, - .yes => true, - .unknown => { - // The type is not resolved; assert that we have a Sema. - const sema = opt_sema.?; - - if (struct_type.flagsPtr(ip).field_types_wip) - return false; - - struct_type.flagsPtr(ip).requires_comptime = .wip; - errdefer struct_type.flagsPtr(ip).requires_comptime = .unknown; - - try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type); - - for (0..struct_type.field_types.len) |i_usize| { - const i: u32 = @intCast(i_usize); - if (struct_type.fieldIsComptime(ip, i)) continue; - const field_ty = struct_type.field_types.get(ip)[i]; - if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) { - // Note that this does not cause the layout to - // be considered resolved. Comptime-only types - // still maintain a layout of their - // runtime-known fields. - struct_type.flagsPtr(ip).requires_comptime = .yes; - return true; - } - } - - struct_type.flagsPtr(ip).requires_comptime = .no; - return false; - }, - }; - }, - - .anon_struct_type => |tuple| { - for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { - const have_comptime_val = val != .none; - if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) return true; - } - return false; - }, - - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - switch (union_type.flagsPtr(ip).requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - // The type is not resolved; assert that we have a Sema. - const sema = opt_sema.?; - - if (union_type.flagsPtr(ip).status == .field_types_wip) - return false; - - union_type.flagsPtr(ip).requires_comptime = .wip; - errdefer union_type.flagsPtr(ip).requires_comptime = .unknown; - - try sema.resolveTypeFieldsUnion(ty, union_type); - - for (0..union_type.field_types.len) |field_idx| { - const field_ty = union_type.field_types.get(ip)[field_idx]; - if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) { - union_type.flagsPtr(ip).requires_comptime = .yes; - return true; - } - } - - union_type.flagsPtr(ip).requires_comptime = .no; - return false; - }, - } - }, - - .opaque_type => false, - - .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, opt_sema), - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }, - }; - } - - pub fn isVector(ty: Type, mod: *const Module) bool { - return ty.zigTypeTag(mod) == .Vector; - } - - /// Returns 0 if not a vector, otherwise returns @bitSizeOf(Element) * vector_len. - pub fn totalVectorBits(ty: Type, zcu: *Zcu) u64 { - if (!ty.isVector(zcu)) return 0; - const v = zcu.intern_pool.indexToKey(ty.toIntern()).vector_type; - return v.len * Type.fromInterned(v.child).bitSize(zcu); - } - - pub fn isArrayOrVector(ty: Type, mod: *const Module) bool { - return switch (ty.zigTypeTag(mod)) { - .Array, .Vector => true, - else => false, - }; - } - - pub fn isIndexable(ty: Type, mod: *Module) bool { - return switch (ty.zigTypeTag(mod)) { - .Array, .Vector => true, - .Pointer => switch (ty.ptrSize(mod)) { - .Slice, .Many, .C => true, - .One => switch (ty.childType(mod).zigTypeTag(mod)) { - .Array, .Vector => true, - .Struct => ty.childType(mod).isTuple(mod), - else => false, - }, - }, - .Struct => ty.isTuple(mod), - else => false, - }; - } - - pub fn indexableHasLen(ty: Type, mod: *Module) bool { - return switch (ty.zigTypeTag(mod)) { - .Array, .Vector => true, - .Pointer => switch (ty.ptrSize(mod)) { - .Many, .C => false, - .Slice => true, - .One => switch (ty.childType(mod).zigTypeTag(mod)) { - .Array, .Vector => true, - .Struct => ty.childType(mod).isTuple(mod), - else => false, - }, - }, - .Struct => ty.isTuple(mod), - else => false, - }; - } - - /// Asserts that the type can have a namespace. - pub fn getNamespaceIndex(ty: Type, zcu: *Zcu) InternPool.OptionalNamespaceIndex { - return ty.getNamespace(zcu).?; - } - - /// Returns null if the type has no namespace. - pub fn getNamespace(ty: Type, zcu: *Zcu) ?InternPool.OptionalNamespaceIndex { - const ip = &zcu.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .opaque_type => ip.loadOpaqueType(ty.toIntern()).namespace, - .struct_type => ip.loadStructType(ty.toIntern()).namespace, - .union_type => ip.loadUnionType(ty.toIntern()).namespace, - .enum_type => ip.loadEnumType(ty.toIntern()).namespace, - - .anon_struct_type => .none, - .simple_type => |s| switch (s) { - .anyopaque, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => .none, - else => null, - }, - - else => null, - }; - } - - // Works for vectors and vectors of integers. - pub fn minInt(ty: Type, mod: *Module, dest_ty: Type) !Value { - const scalar = try minIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod)); - return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = dest_ty.toIntern(), - .storage = .{ .repeated_elem = scalar.toIntern() }, - } }))) else scalar; - } - - /// Asserts that the type is an integer. - pub fn minIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { - const info = ty.intInfo(mod); - if (info.signedness == .unsigned) return mod.intValue(dest_ty, 0); - if (info.bits == 0) return mod.intValue(dest_ty, -1); - - if (std.math.cast(u6, info.bits - 1)) |shift| { - const n = @as(i64, std.math.minInt(i64)) >> (63 - shift); - return mod.intValue(dest_ty, n); - } - - var res = try std.math.big.int.Managed.init(mod.gpa); - defer res.deinit(); - - try res.setTwosCompIntLimit(.min, info.signedness, info.bits); - - return mod.intValue_big(dest_ty, res.toConst()); - } - - // Works for vectors and vectors of integers. - /// The returned Value will have type dest_ty. - pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value { - const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod)); - return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = dest_ty.toIntern(), - .storage = .{ .repeated_elem = scalar.toIntern() }, - } }))) else scalar; - } - - /// The returned Value will have type dest_ty. - pub fn maxIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { - const info = ty.intInfo(mod); - - switch (info.bits) { - 0 => return switch (info.signedness) { - .signed => try mod.intValue(dest_ty, -1), - .unsigned => try mod.intValue(dest_ty, 0), - }, - 1 => return switch (info.signedness) { - .signed => try mod.intValue(dest_ty, 0), - .unsigned => try mod.intValue(dest_ty, 1), - }, - else => {}, - } - - if (std.math.cast(u6, info.bits - 1)) |shift| switch (info.signedness) { - .signed => { - const n = @as(i64, std.math.maxInt(i64)) >> (63 - shift); - return mod.intValue(dest_ty, n); - }, - .unsigned => { - const n = @as(u64, std.math.maxInt(u64)) >> (63 - shift); - return mod.intValue(dest_ty, n); - }, - }; - - var res = try std.math.big.int.Managed.init(mod.gpa); - defer res.deinit(); - - try res.setTwosCompIntLimit(.max, info.signedness, info.bits); - - return mod.intValue_big(dest_ty, res.toConst()); - } - - /// Asserts the type is an enum or a union. - pub fn intTagType(ty: Type, mod: *Module) Type { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .union_type => Type.fromInterned(ip.loadUnionType(ty.toIntern()).enum_tag_ty).intTagType(mod), - .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), - else => unreachable, - }; - } - - pub fn isNonexhaustiveEnum(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) { - .nonexhaustive => true, - .auto, .explicit => false, - }, - else => false, - }; - } - - // Asserts that `ty` is an error set and not `anyerror`. - // Asserts that `ty` is resolved if it is an inferred error set. - pub fn errorSetNames(ty: Type, mod: *Module) InternPool.NullTerminatedString.Slice { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .error_set_type => |x| x.names, - .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { - .none => unreachable, // unresolved inferred error set - .anyerror_type => unreachable, - else => |t| ip.indexToKey(t).error_set_type.names, - }, - else => unreachable, - }; - } - - pub fn enumFields(ty: Type, mod: *Module) InternPool.NullTerminatedString.Slice { - return mod.intern_pool.loadEnumType(ty.toIntern()).names; - } - - pub fn enumFieldCount(ty: Type, mod: *Module) usize { - return mod.intern_pool.loadEnumType(ty.toIntern()).names.len; - } - - pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString { - const ip = &mod.intern_pool; - return ip.loadEnumType(ty.toIntern()).names.get(ip)[field_index]; - } - - pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, mod: *Module) ?u32 { - const ip = &mod.intern_pool; - const enum_type = ip.loadEnumType(ty.toIntern()); - return enum_type.nameIndex(ip, field_name); - } - - /// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or - /// an integer which represents the enum value. Returns the field index in - /// declaration order, or `null` if `enum_tag` does not match any field. - pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 { - const ip = &mod.intern_pool; - const enum_type = ip.loadEnumType(ty.toIntern()); - const int_tag = switch (ip.indexToKey(enum_tag.toIntern())) { - .int => enum_tag.toIntern(), - .enum_tag => |info| info.int, - else => unreachable, - }; - assert(ip.typeOf(int_tag) == enum_type.tag_ty); - return enum_type.tagValueIndex(ip, int_tag); - } - - /// Returns none in the case of a tuple which uses the integer index as the field name. - pub fn structFieldName(ty: Type, index: usize, mod: *Module) InternPool.OptionalNullTerminatedString { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, index), - .anon_struct_type => |anon_struct| anon_struct.fieldName(ip, index), - else => unreachable, - }; - } - - pub fn structFieldCount(ty: Type, mod: *Module) u32 { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).field_types.len, - .anon_struct_type => |anon_struct| anon_struct.types.len, - else => unreachable, - }; - } - - /// Supports structs and unions. - pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => Type.fromInterned(ip.loadStructType(ty.toIntern()).field_types.get(ip)[index]), - .union_type => { - const union_obj = ip.loadUnionType(ty.toIntern()); - return Type.fromInterned(union_obj.field_types.get(ip)[index]); - }, - .anon_struct_type => |anon_struct| Type.fromInterned(anon_struct.types.get(ip)[index]), - else => unreachable, - }; - } - - pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment { - return ty.structFieldAlignAdvanced(index, zcu, null) catch unreachable; - } - - pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, opt_sema: ?*Sema) !Alignment { - const ip = &zcu.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - assert(struct_type.layout != .@"packed"); - const explicit_align = struct_type.fieldAlign(ip, index); - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]); - if (opt_sema) |sema| { - return sema.structFieldAlignment(explicit_align, field_ty, struct_type.layout); - } else { - return zcu.structFieldAlignment(explicit_align, field_ty, struct_type.layout); - } - }, - .anon_struct_type => |anon_struct| { - return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, if (opt_sema) |sema| .{ .sema = sema } else .eager)).scalar; - }, - .union_type => { - const union_obj = ip.loadUnionType(ty.toIntern()); - if (opt_sema) |sema| { - return sema.unionFieldAlignment(union_obj, @intCast(index)); - } else { - return zcu.unionFieldNormalAlignment(union_obj, @intCast(index)); - } - }, - else => unreachable, - } - } - - pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value { - const ip = &mod.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - const val = struct_type.fieldInit(ip, index); - // TODO: avoid using `unreachable` to indicate this. - if (val == .none) return Value.@"unreachable"; - return Value.fromInterned(val); - }, - .anon_struct_type => |anon_struct| { - const val = anon_struct.values.get(ip)[index]; - // TODO: avoid using `unreachable` to indicate this. - if (val == .none) return Value.@"unreachable"; - return Value.fromInterned(val); - }, - else => unreachable, - } - } - - pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value { - const ip = &mod.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.fieldIsComptime(ip, index)) { - assert(struct_type.haveFieldInits(ip)); - return Value.fromInterned(struct_type.field_inits.get(ip)[index]); - } else { - return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(mod); - } - }, - .anon_struct_type => |tuple| { - const val = tuple.values.get(ip)[index]; - if (val == .none) { - return Type.fromInterned(tuple.types.get(ip)[index]).onePossibleValue(mod); - } else { - return Value.fromInterned(val); - } - }, - else => unreachable, - } - } - - pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).fieldIsComptime(ip, index), - .anon_struct_type => |anon_struct| anon_struct.values.get(ip)[index] != .none, - else => unreachable, - }; - } - - pub const FieldOffset = struct { - field: usize, - offset: u64, - }; - - /// Supports structs and unions. - pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { - const ip = &mod.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - assert(struct_type.haveLayout(ip)); - assert(struct_type.layout != .@"packed"); - return struct_type.offsets.get(ip)[index]; - }, - - .anon_struct_type => |tuple| { - var offset: u64 = 0; - var big_align: Alignment = .none; - - for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| { - if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) { - // comptime field - if (i == index) return offset; - continue; - } - - const field_align = Type.fromInterned(field_ty).abiAlignment(mod); - big_align = big_align.max(field_align); - offset = field_align.forward(offset); - if (i == index) return offset; - offset += Type.fromInterned(field_ty).abiSize(mod); - } - offset = big_align.max(.@"1").forward(offset); - return offset; - }, - - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - if (!union_type.hasTag(ip)) - return 0; - const layout = mod.getUnionLayout(union_type); - if (layout.tag_align.compare(.gte, layout.payload_align)) { - // {Tag, Payload} - return layout.payload_align.forward(layout.tag_size); - } else { - // {Payload, Tag} - return 0; - } - }, - - else => unreachable, - } - } - - pub fn getOwnerDecl(ty: Type, mod: *Module) InternPool.DeclIndex { - return ty.getOwnerDeclOrNull(mod) orelse unreachable; - } - - pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?InternPool.DeclIndex { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).decl.unwrap(), - .union_type => ip.loadUnionType(ty.toIntern()).decl, - .opaque_type => ip.loadOpaqueType(ty.toIntern()).decl, - .enum_type => ip.loadEnumType(ty.toIntern()).decl, - else => null, - }; - } - - pub fn srcLocOrNull(ty: Type, zcu: *Zcu) ?Module.LazySrcLoc { - const ip = &zcu.intern_pool; - return .{ - .base_node_inst = switch (ip.indexToKey(ty.toIntern())) { - .struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) { - .declared => |d| d.zir_index, - .reified => |r| r.zir_index, - .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, - .empty_struct => return null, - }, - else => return null, - }, - .offset = Module.LazySrcLoc.Offset.nodeOffset(0), - }; - } - - pub fn srcLoc(ty: Type, zcu: *Zcu) Module.LazySrcLoc { - return ty.srcLocOrNull(zcu).?; - } - - pub fn isGenericPoison(ty: Type) bool { - return ty.toIntern() == .generic_poison_type; - } - - pub fn isTuple(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.layout == .@"packed") return false; - if (struct_type.decl == .none) return false; - return struct_type.flagsPtr(ip).is_tuple; - }, - .anon_struct_type => |anon_struct| anon_struct.names.len == 0, - else => false, - }; - } - - pub fn isAnonStruct(ty: Type, mod: *Module) bool { - if (ty.toIntern() == .empty_struct_type) return true; - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .anon_struct_type => |anon_struct_type| anon_struct_type.names.len > 0, - else => false, - }; - } - - pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.layout == .@"packed") return false; - if (struct_type.decl == .none) return false; - return struct_type.flagsPtr(ip).is_tuple; - }, - .anon_struct_type => true, - else => false, - }; - } - - pub fn isSimpleTuple(ty: Type, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0, - else => false, - }; - } - - pub fn isSimpleTupleOrAnonStruct(ty: Type, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .anon_struct_type => true, - else => false, - }; - } - - /// Traverses optional child types and error union payloads until the type - /// is not a pointer. For `E!?u32`, returns `u32`; for `*u8`, returns `*u8`. - pub fn optEuBaseType(ty: Type, mod: *Module) Type { - var cur = ty; - while (true) switch (cur.zigTypeTag(mod)) { - .Optional => cur = cur.optionalChild(mod), - .ErrorUnion => cur = cur.errorUnionPayload(mod), - else => return cur, - }; - } - - pub fn toUnsigned(ty: Type, mod: *Module) !Type { - return switch (ty.zigTypeTag(mod)) { - .Int => mod.intType(.unsigned, ty.intInfo(mod).bits), - .Vector => try mod.vectorType(.{ - .len = ty.vectorLen(mod), - .child = (try ty.childType(mod).toUnsigned(mod)).toIntern(), - }), - else => unreachable, - }; - } - - pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index { - const ip = &zcu.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).zir_index.unwrap(), - .union_type => ip.loadUnionType(ty.toIntern()).zir_index, - .enum_type => ip.loadEnumType(ty.toIntern()).zir_index.unwrap(), - .opaque_type => ip.loadOpaqueType(ty.toIntern()).zir_index, - else => null, - }; - } - - pub fn typeDeclSrcLine(ty: Type, zcu: *const Zcu) ?u32 { - const ip = &zcu.intern_pool; - const tracked = switch (ip.indexToKey(ty.toIntern())) { - .struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) { - .declared => |d| d.zir_index, - .reified => |r| r.zir_index, - .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, - .empty_struct => return null, - }, - else => return null, - }; - const info = tracked.resolveFull(&zcu.intern_pool); - const file = zcu.import_table.values()[zcu.path_digest_map.getIndex(info.path_digest).?]; - assert(file.zir_loaded); - const zir = file.zir; - const inst = zir.instructions.get(@intFromEnum(info.inst)); - assert(inst.tag == .extended); - return switch (inst.data.extended.opcode) { - .struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_line, - .union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_line, - .enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_line, - .opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_line, - .reify => zir.extraData(Zir.Inst.Reify, inst.data.extended.operand).data.src_line, - else => unreachable, - }; - } - - /// Given a namespace type, returns its list of caotured values. - pub fn getCaptures(ty: Type, zcu: *const Zcu) InternPool.CaptureValue.Slice { - const ip = &zcu.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).captures, - .union_type => ip.loadUnionType(ty.toIntern()).captures, - .enum_type => ip.loadEnumType(ty.toIntern()).captures, - .opaque_type => ip.loadOpaqueType(ty.toIntern()).captures, - else => unreachable, - }; - } - - pub fn arrayBase(ty: Type, zcu: *const Zcu) struct { Type, u64 } { - var cur_ty: Type = ty; - var cur_len: u64 = 1; - while (cur_ty.zigTypeTag(zcu) == .Array) { - cur_len *= cur_ty.arrayLenIncludingSentinel(zcu); - cur_ty = cur_ty.childType(zcu); - } - return .{ cur_ty, cur_len }; - } - - pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: u32, zcu: *Zcu) union(enum) { - /// The result is a bit-pointer with the same value and a new packed offset. - bit_ptr: InternPool.Key.PtrType.PackedOffset, - /// The result is a standard pointer. - byte_ptr: struct { - /// The byte offset of the field pointer from the parent pointer value. - offset: u64, - /// The alignment of the field pointer type. - alignment: InternPool.Alignment, - }, - } { - comptime assert(Type.packed_struct_layout_version == 2); - - const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu); - const field_ty = struct_ty.structFieldType(field_idx, zcu); - - var bit_offset: u16 = 0; - var running_bits: u16 = 0; - for (0..struct_ty.structFieldCount(zcu)) |i| { - const f_ty = struct_ty.structFieldType(i, zcu); - if (i == field_idx) { - bit_offset = running_bits; - } - running_bits += @intCast(f_ty.bitSize(zcu)); - } - - const res_host_size: u16, const res_bit_offset: u16 = if (parent_ptr_info.packed_offset.host_size != 0) - .{ parent_ptr_info.packed_offset.host_size, parent_ptr_info.packed_offset.bit_offset + bit_offset } - else - .{ (running_bits + 7) / 8, bit_offset }; - - // If the field happens to be byte-aligned, simplify the pointer type. - // We can only do this if the pointee's bit size matches its ABI byte size, - // so that loads and stores do not interfere with surrounding packed bits. - // - // TODO: we do not attempt this with big-endian targets yet because of nested - // structs and floats. I need to double-check the desired behavior for big endian - // targets before adding the necessary complications to this code. This will not - // cause miscompilations; it only means the field pointer uses bit masking when it - // might not be strictly necessary. - if (res_bit_offset % 8 == 0 and field_ty.bitSize(zcu) == field_ty.abiSize(zcu) * 8 and zcu.getTarget().cpu.arch.endian() == .little) { - const byte_offset = res_bit_offset / 8; - const new_align = Alignment.fromLog2Units(@ctz(byte_offset | parent_ptr_ty.ptrAlignment(zcu).toByteUnits().?)); - return .{ .byte_ptr = .{ - .offset = byte_offset, - .alignment = new_align, - } }; - } - - return .{ .bit_ptr = .{ - .host_size = res_host_size, - .bit_offset = res_bit_offset, - } }; - } - - pub const @"u1": Type = .{ .ip_index = .u1_type }; - pub const @"u8": Type = .{ .ip_index = .u8_type }; - pub const @"u16": Type = .{ .ip_index = .u16_type }; - pub const @"u29": Type = .{ .ip_index = .u29_type }; - pub const @"u32": Type = .{ .ip_index = .u32_type }; - pub const @"u64": Type = .{ .ip_index = .u64_type }; - pub const @"u128": Type = .{ .ip_index = .u128_type }; - - pub const @"i8": Type = .{ .ip_index = .i8_type }; - pub const @"i16": Type = .{ .ip_index = .i16_type }; - pub const @"i32": Type = .{ .ip_index = .i32_type }; - pub const @"i64": Type = .{ .ip_index = .i64_type }; - pub const @"i128": Type = .{ .ip_index = .i128_type }; - - pub const @"f16": Type = .{ .ip_index = .f16_type }; - pub const @"f32": Type = .{ .ip_index = .f32_type }; - pub const @"f64": Type = .{ .ip_index = .f64_type }; - pub const @"f80": Type = .{ .ip_index = .f80_type }; - pub const @"f128": Type = .{ .ip_index = .f128_type }; - - pub const @"bool": Type = .{ .ip_index = .bool_type }; - pub const @"usize": Type = .{ .ip_index = .usize_type }; - pub const @"isize": Type = .{ .ip_index = .isize_type }; - pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type }; - pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type }; - pub const @"void": Type = .{ .ip_index = .void_type }; - pub const @"type": Type = .{ .ip_index = .type_type }; - pub const @"anyerror": Type = .{ .ip_index = .anyerror_type }; - pub const @"anyopaque": Type = .{ .ip_index = .anyopaque_type }; - pub const @"anyframe": Type = .{ .ip_index = .anyframe_type }; - pub const @"null": Type = .{ .ip_index = .null_type }; - pub const @"undefined": Type = .{ .ip_index = .undefined_type }; - pub const @"noreturn": Type = .{ .ip_index = .noreturn_type }; - - pub const @"c_char": Type = .{ .ip_index = .c_char_type }; - pub const @"c_short": Type = .{ .ip_index = .c_short_type }; - pub const @"c_ushort": Type = .{ .ip_index = .c_ushort_type }; - pub const @"c_int": Type = .{ .ip_index = .c_int_type }; - pub const @"c_uint": Type = .{ .ip_index = .c_uint_type }; - pub const @"c_long": Type = .{ .ip_index = .c_long_type }; - pub const @"c_ulong": Type = .{ .ip_index = .c_ulong_type }; - pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type }; - pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type }; - pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type }; - - pub const slice_const_u8: Type = .{ .ip_index = .slice_const_u8_type }; - pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type }; - pub const single_const_pointer_to_comptime_int: Type = .{ - .ip_index = .single_const_pointer_to_comptime_int_type, - }; - pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type }; - pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type }; - - pub const generic_poison: Type = .{ .ip_index = .generic_poison_type }; - - pub fn smallestUnsignedBits(max: u64) u16 { - if (max == 0) return 0; - const base = std.math.log2(max); - const upper = (@as(u64, 1) << @as(u6, @intCast(base))) - 1; - return @as(u16, @intCast(base + @intFromBool(upper < max))); - } - - /// This is only used for comptime asserts. Bump this number when you make a change - /// to packed struct layout to find out all the places in the codebase you need to edit! - pub const packed_struct_layout_version = 2; -}; - -fn cTypeAlign(target: Target, c_type: Target.CType) Alignment { - return Alignment.fromByteUnits(target.c_type_alignment(c_type)); -} -- cgit v1.2.3 From 0e5335aaf5e0ac646fbd46a319710019d10c2971 Mon Sep 17 00:00:00 2001 From: mlugg Date: Thu, 4 Jul 2024 05:00:32 +0100 Subject: compiler: rework type resolution, fully resolve all types I'm so sorry. This commit was just meant to be making all types fully resolve by queueing resolution at the moment of their creation. Unfortunately, a lot of dominoes ended up falling. Here's what happened: * I added a work queue job to fully resolve a type. * I realised that from here we could eliminate `Sema.types_to_resolve` if we made function codegen a separate job. This is desirable for simplicity of both spec and implementation. * This led to a new AIR traversal to detect whether any required type is unresolved. If a type in the AIR failed to resolve, then we can't run codegen. * Because full type resolution now occurs by the work queue job, a bug was exposed whereby error messages for type resolution were associated with the wrong `Decl`, resulting in duplicate error messages when the type was also resolved "by" its owner `Decl` (which really *all* resolution should be done on). * A correct fix for this requires using a different `Sema` when performing type resolution: we need a `Sema` owned by the type. Also note that this fix is necessary for incremental compilation. * This means a whole bunch of functions no longer need to take `Sema`s. * First-order effects: `resolveTypeFields`, `resolveTypeLayout`, etc * Second-order effects: `Type.abiAlignmentAdvanced`, `Value.orderAgainstZeroAdvanced`, etc The end result of this is, in short, a more correct compiler and a simpler language specification. This regressed a few error notes in the test cases, but nothing that seems worth blocking this change. Oh, also, I ripped out the old code in `test/src/Cases.zig` which introduced a dependency on `Compilation`. This dependency was problematic at best, and this code has been unused for a while. When we re-enable incremental test cases, we must rewrite their executor to use the compiler server protocol. --- build.zig | 28 +- src/Air.zig | 2 + src/Air/types_resolved.zig | 521 +++++++ src/Compilation.zig | 34 +- src/Sema.zig | 1458 ++++++-------------- src/Sema/bitcast.zig | 8 +- src/Type.zig | 624 +++++++-- src/Value.zig | 306 ++-- src/Zcu.zig | 164 ++- src/codegen/llvm.zig | 5 +- src/print_value.zig | 8 +- test/cases/compile_errors/direct_struct_loop.zig | 1 - test/cases/compile_errors/indirect_struct_loop.zig | 3 - ..._for_an_invalid_struct_that_contains_itself.zig | 1 - ...e_for_an_invalid_union_that_contains_itself.zig | 1 - .../invalid_dependency_on_struct_size.zig | 1 - ...struct_depends_on_itself_via_optional_field.zig | 2 - ...uct_type_returned_from_non-generic_function.zig | 2 +- test/src/Cases.zig | 703 +--------- test/tests.zig | 4 - 20 files changed, 1850 insertions(+), 2026 deletions(-) create mode 100644 src/Air/types_resolved.zig (limited to 'src/codegen') diff --git a/build.zig b/build.zig index 3898acc6ac..0f0d7d4d67 100644 --- a/build.zig +++ b/build.zig @@ -82,15 +82,6 @@ pub fn build(b: *std.Build) !void { docs_step.dependOn(langref_step); docs_step.dependOn(std_docs_step); - const check_case_exe = b.addExecutable(.{ - .name = "check-case", - .root_source_file = b.path("test/src/Cases.zig"), - .target = b.graph.host, - .optimize = optimize, - .single_threaded = single_threaded, - }); - check_case_exe.stack_size = stack_size; - const skip_debug = b.option(bool, "skip-debug", "Main test suite skips debug builds") orelse false; const skip_release = b.option(bool, "skip-release", "Main test suite skips release builds") orelse false; const skip_release_small = b.option(bool, "skip-release-small", "Main test suite skips release-small builds") orelse skip_release; @@ -222,7 +213,6 @@ pub fn build(b: *std.Build) !void { if (target.result.os.tag == .windows and target.result.abi == .gnu) { // LTO is currently broken on mingw, this can be removed when it's fixed. exe.want_lto = false; - check_case_exe.want_lto = false; } const use_llvm = b.option(bool, "use-llvm", "Use the llvm backend"); @@ -245,7 +235,6 @@ pub fn build(b: *std.Build) !void { if (link_libc) { exe.linkLibC(); - check_case_exe.linkLibC(); } const is_debug = optimize == .Debug; @@ -339,21 +328,17 @@ pub fn build(b: *std.Build) !void { } try addCmakeCfgOptionsToExe(b, cfg, exe, use_zig_libcxx); - try addCmakeCfgOptionsToExe(b, cfg, check_case_exe, use_zig_libcxx); } else { // Here we are -Denable-llvm but no cmake integration. try addStaticLlvmOptionsToExe(exe); - try addStaticLlvmOptionsToExe(check_case_exe); } if (target.result.os.tag == .windows) { - inline for (.{ exe, check_case_exe }) |artifact| { - // LLVM depends on networking as of version 18. - artifact.linkSystemLibrary("ws2_32"); + // LLVM depends on networking as of version 18. + exe.linkSystemLibrary("ws2_32"); - artifact.linkSystemLibrary("version"); - artifact.linkSystemLibrary("uuid"); - artifact.linkSystemLibrary("ole32"); - } + exe.linkSystemLibrary("version"); + exe.linkSystemLibrary("uuid"); + exe.linkSystemLibrary("ole32"); } } @@ -394,7 +379,6 @@ pub fn build(b: *std.Build) !void { const test_filters = b.option([]const []const u8, "test-filter", "Skip tests that do not match any filter") orelse &[0][]const u8{}; const test_cases_options = b.addOptions(); - check_case_exe.root_module.addOptions("build_options", test_cases_options); test_cases_options.addOption(bool, "enable_tracy", false); test_cases_options.addOption(bool, "enable_debug_extensions", enable_debug_extensions); @@ -458,7 +442,7 @@ pub fn build(b: *std.Build) !void { test_step.dependOn(check_fmt); const test_cases_step = b.step("test-cases", "Run the main compiler test cases"); - try tests.addCases(b, test_cases_step, test_filters, check_case_exe, target, .{ + try tests.addCases(b, test_cases_step, test_filters, target, .{ .skip_translate_c = skip_translate_c, .skip_run_translated_c = skip_run_translated_c, }, .{ diff --git a/src/Air.zig b/src/Air.zig index 0a05470e1c..5799c31b25 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1801,3 +1801,5 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip), }; } + +pub const typesFullyResolved = @import("Air/types_resolved.zig").typesFullyResolved; diff --git a/src/Air/types_resolved.zig b/src/Air/types_resolved.zig new file mode 100644 index 0000000000..073f2d68d4 --- /dev/null +++ b/src/Air/types_resolved.zig @@ -0,0 +1,521 @@ +const Air = @import("../Air.zig"); +const Zcu = @import("../Zcu.zig"); +const Type = @import("../Type.zig"); +const Value = @import("../Value.zig"); +const InternPool = @import("../InternPool.zig"); + +/// Given a body of AIR instructions, returns whether all type resolution necessary for codegen is complete. +/// If `false`, then type resolution must have failed, so codegen cannot proceed. +pub fn typesFullyResolved(air: Air, zcu: *Zcu) bool { + return checkBody(air, air.getMainBody(), zcu); +} + +fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool { + const tags = air.instructions.items(.tag); + const datas = air.instructions.items(.data); + + for (body) |inst| { + const data = datas[@intFromEnum(inst)]; + switch (tags[@intFromEnum(inst)]) { + .inferred_alloc, .inferred_alloc_comptime => unreachable, + + .arg => { + if (!checkType(data.arg.ty.toType(), zcu)) return false; + }, + + .add, + .add_safe, + .add_optimized, + .add_wrap, + .add_sat, + .sub, + .sub_safe, + .sub_optimized, + .sub_wrap, + .sub_sat, + .mul, + .mul_safe, + .mul_optimized, + .mul_wrap, + .mul_sat, + .div_float, + .div_float_optimized, + .div_trunc, + .div_trunc_optimized, + .div_floor, + .div_floor_optimized, + .div_exact, + .div_exact_optimized, + .rem, + .rem_optimized, + .mod, + .mod_optimized, + .max, + .min, + .bit_and, + .bit_or, + .shr, + .shr_exact, + .shl, + .shl_exact, + .shl_sat, + .xor, + .cmp_lt, + .cmp_lt_optimized, + .cmp_lte, + .cmp_lte_optimized, + .cmp_eq, + .cmp_eq_optimized, + .cmp_gte, + .cmp_gte_optimized, + .cmp_gt, + .cmp_gt_optimized, + .cmp_neq, + .cmp_neq_optimized, + .bool_and, + .bool_or, + .store, + .store_safe, + .set_union_tag, + .array_elem_val, + .slice_elem_val, + .ptr_elem_val, + .memset, + .memset_safe, + .memcpy, + .atomic_store_unordered, + .atomic_store_monotonic, + .atomic_store_release, + .atomic_store_seq_cst, + => { + if (!checkRef(data.bin_op.lhs, zcu)) return false; + if (!checkRef(data.bin_op.rhs, zcu)) return false; + }, + + .not, + .bitcast, + .clz, + .ctz, + .popcount, + .byte_swap, + .bit_reverse, + .abs, + .load, + .fptrunc, + .fpext, + .intcast, + .trunc, + .optional_payload, + .optional_payload_ptr, + .optional_payload_ptr_set, + .wrap_optional, + .unwrap_errunion_payload, + .unwrap_errunion_err, + .unwrap_errunion_payload_ptr, + .unwrap_errunion_err_ptr, + .errunion_payload_ptr_set, + .wrap_errunion_payload, + .wrap_errunion_err, + .struct_field_ptr_index_0, + .struct_field_ptr_index_1, + .struct_field_ptr_index_2, + .struct_field_ptr_index_3, + .get_union_tag, + .slice_len, + .slice_ptr, + .ptr_slice_len_ptr, + .ptr_slice_ptr_ptr, + .array_to_slice, + .int_from_float, + .int_from_float_optimized, + .float_from_int, + .splat, + .error_set_has_value, + .addrspace_cast, + .c_va_arg, + .c_va_copy, + => { + if (!checkType(data.ty_op.ty.toType(), zcu)) return false; + if (!checkRef(data.ty_op.operand, zcu)) return false; + }, + + .alloc, + .ret_ptr, + .c_va_start, + => { + if (!checkType(data.ty, zcu)) return false; + }, + + .ptr_add, + .ptr_sub, + .add_with_overflow, + .sub_with_overflow, + .mul_with_overflow, + .shl_with_overflow, + .slice, + .slice_elem_ptr, + .ptr_elem_ptr, + => { + const bin = air.extraData(Air.Bin, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(bin.lhs, zcu)) return false; + if (!checkRef(bin.rhs, zcu)) return false; + }, + + .block, + .loop, + => { + const extra = air.extraData(Air.Block, data.ty_pl.payload); + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra.end..][0..extra.data.body_len]), + zcu, + )) return false; + }, + + .dbg_inline_block => { + const extra = air.extraData(Air.DbgInlineBlock, data.ty_pl.payload); + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra.end..][0..extra.data.body_len]), + zcu, + )) return false; + }, + + .sqrt, + .sin, + .cos, + .tan, + .exp, + .exp2, + .log, + .log2, + .log10, + .floor, + .ceil, + .round, + .trunc_float, + .neg, + .neg_optimized, + .is_null, + .is_non_null, + .is_null_ptr, + .is_non_null_ptr, + .is_err, + .is_non_err, + .is_err_ptr, + .is_non_err_ptr, + .int_from_ptr, + .int_from_bool, + .ret, + .ret_safe, + .ret_load, + .is_named_enum_value, + .tag_name, + .error_name, + .cmp_lt_errors_len, + .c_va_end, + .set_err_return_trace, + => { + if (!checkRef(data.un_op, zcu)) return false; + }, + + .br => { + if (!checkRef(data.br.operand, zcu)) return false; + }, + + .cmp_vector, + .cmp_vector_optimized, + => { + const extra = air.extraData(Air.VectorCmp, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.lhs, zcu)) return false; + if (!checkRef(extra.rhs, zcu)) return false; + }, + + .reduce, + .reduce_optimized, + => { + if (!checkRef(data.reduce.operand, zcu)) return false; + }, + + .struct_field_ptr, + .struct_field_val, + => { + const extra = air.extraData(Air.StructField, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.struct_operand, zcu)) return false; + }, + + .shuffle => { + const extra = air.extraData(Air.Shuffle, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.a, zcu)) return false; + if (!checkRef(extra.b, zcu)) return false; + if (!checkVal(Value.fromInterned(extra.mask), zcu)) return false; + }, + + .cmpxchg_weak, + .cmpxchg_strong, + => { + const extra = air.extraData(Air.Cmpxchg, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.ptr, zcu)) return false; + if (!checkRef(extra.expected_value, zcu)) return false; + if (!checkRef(extra.new_value, zcu)) return false; + }, + + .aggregate_init => { + const ty = data.ty_pl.ty.toType(); + const elems_len: usize = @intCast(ty.arrayLen(zcu)); + const elems: []const Air.Inst.Ref = @ptrCast(air.extra[data.ty_pl.payload..][0..elems_len]); + if (!checkType(ty, zcu)) return false; + if (ty.zigTypeTag(zcu) == .Struct) { + for (elems, 0..) |elem, elem_idx| { + if (ty.structFieldIsComptime(elem_idx, zcu)) continue; + if (!checkRef(elem, zcu)) return false; + } + } else { + for (elems) |elem| { + if (!checkRef(elem, zcu)) return false; + } + } + }, + + .union_init => { + const extra = air.extraData(Air.UnionInit, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.init, zcu)) return false; + }, + + .field_parent_ptr => { + const extra = air.extraData(Air.FieldParentPtr, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.field_ptr, zcu)) return false; + }, + + .atomic_load => { + if (!checkRef(data.atomic_load.ptr, zcu)) return false; + }, + + .prefetch => { + if (!checkRef(data.prefetch.ptr, zcu)) return false; + }, + + .vector_store_elem => { + const bin = air.extraData(Air.Bin, data.vector_store_elem.payload).data; + if (!checkRef(data.vector_store_elem.vector_ptr, zcu)) return false; + if (!checkRef(bin.lhs, zcu)) return false; + if (!checkRef(bin.rhs, zcu)) return false; + }, + + .select, + .mul_add, + => { + const bin = air.extraData(Air.Bin, data.pl_op.payload).data; + if (!checkRef(data.pl_op.operand, zcu)) return false; + if (!checkRef(bin.lhs, zcu)) return false; + if (!checkRef(bin.rhs, zcu)) return false; + }, + + .atomic_rmw => { + const extra = air.extraData(Air.AtomicRmw, data.pl_op.payload).data; + if (!checkRef(data.pl_op.operand, zcu)) return false; + if (!checkRef(extra.operand, zcu)) return false; + }, + + .call, + .call_always_tail, + .call_never_tail, + .call_never_inline, + => { + const extra = air.extraData(Air.Call, data.pl_op.payload); + const args: []const Air.Inst.Ref = @ptrCast(air.extra[extra.end..][0..extra.data.args_len]); + if (!checkRef(data.pl_op.operand, zcu)) return false; + for (args) |arg| if (!checkRef(arg, zcu)) return false; + }, + + .dbg_var_ptr, + .dbg_var_val, + => { + if (!checkRef(data.pl_op.operand, zcu)) return false; + }, + + .@"try" => { + const extra = air.extraData(Air.Try, data.pl_op.payload); + if (!checkRef(data.pl_op.operand, zcu)) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra.end..][0..extra.data.body_len]), + zcu, + )) return false; + }, + + .try_ptr => { + const extra = air.extraData(Air.TryPtr, data.ty_pl.payload); + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.data.ptr, zcu)) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra.end..][0..extra.data.body_len]), + zcu, + )) return false; + }, + + .cond_br => { + const extra = air.extraData(Air.CondBr, data.pl_op.payload); + if (!checkRef(data.pl_op.operand, zcu)) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra.end..][0..extra.data.then_body_len]), + zcu, + )) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra.end + extra.data.then_body_len ..][0..extra.data.else_body_len]), + zcu, + )) return false; + }, + + .switch_br => { + const extra = air.extraData(Air.SwitchBr, data.pl_op.payload); + if (!checkRef(data.pl_op.operand, zcu)) return false; + var extra_index = extra.end; + for (0..extra.data.cases_len) |_| { + const case = air.extraData(Air.SwitchBr.Case, extra_index); + extra_index = case.end; + const items: []const Air.Inst.Ref = @ptrCast(air.extra[extra_index..][0..case.data.items_len]); + extra_index += case.data.items_len; + for (items) |item| if (!checkRef(item, zcu)) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra_index..][0..case.data.body_len]), + zcu, + )) return false; + extra_index += case.data.body_len; + } + if (!checkBody( + air, + @ptrCast(air.extra[extra_index..][0..extra.data.else_body_len]), + zcu, + )) return false; + }, + + .assembly => { + const extra = air.extraData(Air.Asm, data.ty_pl.payload); + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + // Luckily, we only care about the inputs and outputs, so we don't have to do + // the whole null-terminated string dance. + const outputs: []const Air.Inst.Ref = @ptrCast(air.extra[extra.end..][0..extra.data.outputs_len]); + const inputs: []const Air.Inst.Ref = @ptrCast(air.extra[extra.end + extra.data.outputs_len ..][0..extra.data.inputs_len]); + for (outputs) |output| if (output != .none and !checkRef(output, zcu)) return false; + for (inputs) |input| if (input != .none and !checkRef(input, zcu)) return false; + }, + + .trap, + .breakpoint, + .ret_addr, + .frame_addr, + .unreach, + .wasm_memory_size, + .wasm_memory_grow, + .work_item_id, + .work_group_size, + .work_group_id, + .fence, + .dbg_stmt, + .err_return_trace, + .save_err_return_trace_index, + => {}, + } + } + return true; +} + +fn checkRef(ref: Air.Inst.Ref, zcu: *Zcu) bool { + const ip_index = ref.toInterned() orelse { + // This operand refers back to a previous instruction. + // We have already checked that instruction's type. + // So, there's no need to check this operand's type. + return true; + }; + return checkVal(Value.fromInterned(ip_index), zcu); +} + +fn checkVal(val: Value, zcu: *Zcu) bool { + if (!checkType(val.typeOf(zcu), zcu)) return false; + // Check for lazy values + switch (zcu.intern_pool.indexToKey(val.toIntern())) { + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => return true, + .lazy_align, .lazy_size => |ty_index| { + return checkType(Type.fromInterned(ty_index), zcu); + }, + }, + else => return true, + } +} + +fn checkType(ty: Type, zcu: *Zcu) bool { + const ip = &zcu.intern_pool; + return switch (ty.zigTypeTag(zcu)) { + .Type, + .Void, + .Bool, + .NoReturn, + .Int, + .Float, + .ErrorSet, + .Enum, + .Opaque, + .Vector, + // These types can appear due to some dummy instructions Sema introduces and expects to be omitted by Liveness. + // It's a little silly -- but fine, we'll return `true`. + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .EnumLiteral, + => true, + + .Frame, + .AnyFrame, + => @panic("TODO Air.types_resolved.checkType async frames"), + + .Optional => checkType(ty.childType(zcu), zcu), + .ErrorUnion => checkType(ty.errorUnionPayload(zcu), zcu), + .Pointer => checkType(ty.childType(zcu), zcu), + .Array => checkType(ty.childType(zcu), zcu), + + .Fn => { + const info = zcu.typeToFunc(ty).?; + for (0..info.param_types.len) |i| { + const param_ty = info.param_types.get(ip)[i]; + if (!checkType(Type.fromInterned(param_ty), zcu)) return false; + } + return checkType(Type.fromInterned(info.return_type), zcu); + }, + .Struct => switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_obj = zcu.typeToStruct(ty).?; + return switch (struct_obj.layout) { + .@"packed" => struct_obj.backingIntType(ip).* != .none, + .auto, .@"extern" => struct_obj.flagsPtr(ip).fully_resolved, + }; + }, + .anon_struct_type => |tuple| { + for (0..tuple.types.len) |i| { + const field_is_comptime = tuple.values.get(ip)[i] != .none; + if (field_is_comptime) continue; + const field_ty = tuple.types.get(ip)[i]; + if (!checkType(Type.fromInterned(field_ty), zcu)) return false; + } + return true; + }, + else => unreachable, + }, + .Union => return zcu.typeToUnion(ty).?.flagsPtr(ip).status == .fully_resolved, + }; +} diff --git a/src/Compilation.zig b/src/Compilation.zig index b964ffd0d1..7447d589fd 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -37,6 +37,7 @@ const Cache = std.Build.Cache; const c_codegen = @import("codegen/c.zig"); const libtsan = @import("libtsan.zig"); const Zir = std.zig.Zir; +const Air = @import("Air.zig"); const Builtin = @import("Builtin.zig"); const LlvmObject = @import("codegen/llvm.zig").Object; @@ -316,18 +317,29 @@ const Job = union(enum) { codegen_decl: InternPool.DeclIndex, /// Write the machine code for a function to the output file. /// This will either be a non-generic `func_decl` or a `func_instance`. - codegen_func: InternPool.Index, + codegen_func: struct { + func: InternPool.Index, + /// This `Air` is owned by the `Job` and allocated with `gpa`. + /// It must be deinited when the job is processed. + air: Air, + }, /// Render the .h file snippet for the Decl. emit_h_decl: InternPool.DeclIndex, /// The Decl needs to be analyzed and possibly export itself. /// It may have already be analyzed, or it may have been determined /// to be outdated; in this case perform semantic analysis again. analyze_decl: InternPool.DeclIndex, + /// Analyze the body of a runtime function. + /// After analysis, a `codegen_func` job will be queued. + /// These must be separate jobs to ensure any needed type resolution occurs *before* codegen. + analyze_func: InternPool.Index, /// The source file containing the Decl has been updated, and so the /// Decl may need its line number information updated in the debug info. update_line_number: InternPool.DeclIndex, /// The main source file for the module needs to be analyzed. analyze_mod: *Package.Module, + /// Fully resolve the given `struct` or `union` type. + resolve_type_fully: InternPool.Index, /// one of the glibc static objects glibc_crt_file: glibc.CRTFile, @@ -3389,7 +3401,7 @@ pub fn performAllTheWork( if (try zcu.findOutdatedToAnalyze()) |outdated| { switch (outdated.unwrap()) { .decl => |decl| try comp.work_queue.writeItem(.{ .analyze_decl = decl }), - .func => |func| try comp.work_queue.writeItem(.{ .codegen_func = func }), + .func => |func| try comp.work_queue.writeItem(.{ .analyze_func = func }), } continue; } @@ -3439,6 +3451,14 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo const named_frame = tracy.namedFrame("codegen_func"); defer named_frame.end(); + const module = comp.module.?; + // This call takes ownership of `func.air`. + try module.linkerUpdateFunc(func.func, func.air); + }, + .analyze_func => |func| { + const named_frame = tracy.namedFrame("analyze_func"); + defer named_frame.end(); + const module = comp.module.?; module.ensureFuncBodyAnalyzed(func) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, @@ -3518,6 +3538,16 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo try module.ensureFuncBodyAnalysisQueued(decl.val.toIntern()); } }, + .resolve_type_fully => |ty| { + const named_frame = tracy.namedFrame("resolve_type_fully"); + defer named_frame.end(); + + const zcu = comp.module.?; + Type.fromInterned(ty).resolveFully(zcu) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => return, + }; + }, .update_line_number => |decl_index| { const named_frame = tracy.namedFrame("update_line_number"); defer named_frame.end(); diff --git a/src/Sema.zig b/src/Sema.zig index 57b2c897a1..9dfbc724eb 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -64,14 +64,6 @@ generic_owner: InternPool.Index = .none, /// instantiation can point back to the instantiation site in addition to the /// declaration site. generic_call_src: LazySrcLoc = LazySrcLoc.unneeded, -/// The key is types that must be fully resolved prior to machine code -/// generation pass. Types are added to this set when resolving them -/// immediately could cause a dependency loop, but they do need to be resolved -/// before machine code generation passes process the AIR. -/// It would work fine if this were an array list instead of an array hash map. -/// I chose array hash map with the intention to save time by omitting -/// duplicates. -types_to_resolve: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, /// These are lazily created runtime blocks from block_inline instructions. /// They are created when an break_inline passes through a runtime condition, because /// Sema must convert comptime control flow to runtime control flow, which means @@ -872,7 +864,6 @@ pub fn deinit(sema: *Sema) void { sema.air_extra.deinit(gpa); sema.inst_map.deinit(gpa); sema.decl_val_table.deinit(gpa); - sema.types_to_resolve.deinit(gpa); { var it = sema.post_hoc_blocks.iterator(); while (it.next()) |entry| { @@ -2078,8 +2069,8 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) const addrs_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(addr_arr_ty)); // var st: StackTrace = undefined; - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const st_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(stack_trace_ty)); // st.instruction_addresses = &addrs; @@ -2628,7 +2619,7 @@ fn analyzeAsInt( const mod = sema.mod; const coerced = try sema.coerce(block, dest_ty, air_ref, src); const val = try sema.resolveConstDefinedValue(block, src, coerced, reason); - return (try val.getUnsignedIntAdvanced(mod, sema)).?; + return (try val.getUnsignedIntAdvanced(mod, .sema)).?; } /// Given a ZIR extra index which points to a list of `Zir.Inst.Capture`, @@ -2832,6 +2823,7 @@ fn zirStructDecl( } try mod.finalizeAnonDecl(new_decl_index); + try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index)); } @@ -3332,7 +3324,7 @@ fn zirUnionDecl( } try mod.finalizeAnonDecl(new_decl_index); - + try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index)); } @@ -3457,12 +3449,12 @@ fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { defer tracy.end(); if (block.is_comptime or try sema.typeRequiresComptime(sema.fn_ret_ty)) { - try sema.resolveTypeFields(sema.fn_ret_ty); + try sema.fn_ret_ty.resolveFields(sema.mod); return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty, .none); } const target = sema.mod.getTarget(); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try sema.mod.ptrTypeSema(.{ .child = sema.fn_ret_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -3471,7 +3463,6 @@ fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { // We are inlining a function call; this should be emitted as an alloc, not a ret_ptr. // TODO when functions gain result location support, the inlining struct in // Block should contain the return pointer, and we would pass that through here. - try sema.queueFullTypeResolution(sema.fn_ret_ty); return block.addTy(.alloc, ptr_type); } @@ -3667,8 +3658,8 @@ fn zirAllocExtended( try sema.validateVarType(block, ty_src, var_ty, false); } const target = sema.mod.getTarget(); - try sema.resolveTypeLayout(var_ty); - const ptr_type = try sema.ptrType(.{ + try var_ty.resolveLayout(sema.mod); + const ptr_type = try sema.mod.ptrTypeSema(.{ .child = var_ty.toIntern(), .flags = .{ .alignment = alignment, @@ -3902,7 +3893,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, const idx_val = (try sema.resolveValue(data.rhs)).?; break :blk .{ data.lhs, - .{ .elem = try idx_val.toUnsignedIntAdvanced(sema) }, + .{ .elem = try idx_val.toUnsignedIntSema(zcu) }, }; }, .bitcast => .{ @@ -3940,7 +3931,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, .val = payload_val.toIntern(), } }); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), Value.fromInterned(opt_val), opt_ty); - break :ptr (try Value.fromInterned(decl_parent_ptr).ptrOptPayload(sema)).toIntern(); + break :ptr (try Value.fromInterned(decl_parent_ptr).ptrOptPayload(zcu)).toIntern(); }, .eu_payload => ptr: { // Set the error union to non-error at comptime. @@ -3953,7 +3944,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, .val = .{ .payload = payload_val.toIntern() }, } }); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), Value.fromInterned(eu_val), eu_ty); - break :ptr (try Value.fromInterned(decl_parent_ptr).ptrEuPayload(sema)).toIntern(); + break :ptr (try Value.fromInterned(decl_parent_ptr).ptrEuPayload(zcu)).toIntern(); }, .field => |idx| ptr: { const maybe_union_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu); @@ -3967,9 +3958,9 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, const store_val = try zcu.unionValue(maybe_union_ty, tag_val, payload_val); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), store_val, maybe_union_ty); } - break :ptr (try Value.fromInterned(decl_parent_ptr).ptrField(idx, sema)).toIntern(); + break :ptr (try Value.fromInterned(decl_parent_ptr).ptrField(idx, zcu)).toIntern(); }, - .elem => |idx| (try Value.fromInterned(decl_parent_ptr).ptrElem(idx, sema)).toIntern(), + .elem => |idx| (try Value.fromInterned(decl_parent_ptr).ptrElem(idx, zcu)).toIntern(), }; try ptr_mapping.put(air_ptr, new_ptr); } @@ -4060,7 +4051,7 @@ fn finishResolveComptimeKnownAllocPtr( fn makePtrTyConst(sema: *Sema, ptr_ty: Type) CompileError!Type { var ptr_info = ptr_ty.ptrInfo(sema.mod); ptr_info.flags.is_const = true; - return sema.ptrType(ptr_info); + return sema.mod.ptrTypeSema(ptr_info); } fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Air.Inst.Ref { @@ -4103,11 +4094,10 @@ fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I return sema.analyzeComptimeAlloc(block, var_ty, .none); } const target = sema.mod.getTarget(); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try sema.mod.ptrTypeSema(.{ .child = var_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); - try sema.queueFullTypeResolution(var_ty); const ptr = try block.addTy(.alloc, ptr_type); const ptr_inst = ptr.toIndex().?; try sema.maybe_comptime_allocs.put(sema.gpa, ptr_inst, .{ .runtime_index = block.runtime_index }); @@ -4127,11 +4117,10 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } try sema.validateVarType(block, ty_src, var_ty, false); const target = sema.mod.getTarget(); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try sema.mod.ptrTypeSema(.{ .child = var_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); - try sema.queueFullTypeResolution(var_ty); return block.addTy(.alloc, ptr_type); } @@ -4227,7 +4216,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com } const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_vals, .none); - const final_ptr_ty = try sema.ptrType(.{ + const final_ptr_ty = try mod.ptrTypeSema(.{ .child = final_elem_ty.toIntern(), .flags = .{ .alignment = ia1.alignment, @@ -4247,7 +4236,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com // Unless the block is comptime, `alloc_inferred` always produces // a runtime constant. The final inferred type needs to be // fully resolved so it can be lowered in codegen. - try sema.resolveTypeFully(final_elem_ty); + try final_elem_ty.resolveFully(mod); return; } @@ -4259,8 +4248,6 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com return sema.fail(block, src, "value with comptime-only type '{}' depends on runtime control flow", .{final_elem_ty.fmt(mod)}); } - try sema.queueFullTypeResolution(final_elem_ty); - // Change it to a normal alloc. sema.air_instructions.set(@intFromEnum(ptr_inst), .{ .tag = .alloc, @@ -4633,7 +4620,7 @@ fn validateArrayInitTy( return; }, .Struct => if (ty.isTuple(mod)) { - try sema.resolveTypeFields(ty); + try ty.resolveFields(mod); const array_len = ty.arrayLen(mod); if (init_count > array_len) { return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{ @@ -4911,7 +4898,7 @@ fn validateStructInit( if (block.is_comptime and (try sema.resolveDefinedValue(block, init_src, struct_ptr)) != null) { - try sema.resolveStructLayout(struct_ty); + try struct_ty.resolveLayout(mod); // In this case the only thing we need to do is evaluate the implicit // store instructions for default field values, and report any missing fields. // Avoid the cost of the extra machinery for detecting a comptime struct init value. @@ -4919,7 +4906,7 @@ fn validateStructInit( const i: u32 = @intCast(i_usize); if (field_ptr != .none) continue; - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const default_val = struct_ty.structFieldDefaultValue(i, mod); if (default_val.toIntern() == .unreachable_value) { const field_name = struct_ty.structFieldName(i, mod).unwrap() orelse { @@ -4968,7 +4955,7 @@ fn validateStructInit( const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); // We collect the comptime field values in case the struct initialization // ends up being comptime-known. @@ -5127,7 +5114,7 @@ fn validateStructInit( try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store); return; } - try sema.resolveStructLayout(struct_ty); + try struct_ty.resolveLayout(mod); // Our task is to insert `store` instructions for all the default field values. for (found_fields, 0..) |field_ptr, i| { @@ -5172,7 +5159,7 @@ fn zirValidatePtrArrayInit( var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); - try sema.resolveStructFieldInits(array_ty); + try array_ty.resolveStructFieldInits(mod); var i = instrs.len; while (i < array_len) : (i += 1) { const default_val = array_ty.structFieldDefaultValue(i, mod).toIntern(); @@ -5241,7 +5228,7 @@ fn zirValidatePtrArrayInit( if (array_ty.isTuple(mod)) { if (array_ty.structFieldIsComptime(i, mod)) - try sema.resolveStructFieldInits(array_ty); + try array_ty.resolveStructFieldInits(mod); if (try array_ty.structFieldValueComptime(mod, i)) |opv| { element_vals[i] = opv.toIntern(); continue; @@ -5581,7 +5568,7 @@ fn storeToInferredAllocComptime( .needed_comptime_reason = "value being stored to a comptime variable must be comptime-known", }); }; - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try zcu.ptrTypeSema(.{ .child = operand_ty.toIntern(), .flags = .{ .alignment = iac.alignment, @@ -5688,7 +5675,7 @@ fn anonDeclRef(sema: *Sema, val: InternPool.Index) CompileError!Air.Inst.Ref { fn refValue(sema: *Sema, val: InternPool.Index) CompileError!InternPool.Index { const mod = sema.mod; - const ptr_ty = (try sema.ptrType(.{ + const ptr_ty = (try mod.ptrTypeSema(.{ .child = mod.intern_pool.typeOf(val), .flags = .{ .alignment = .none, @@ -6645,8 +6632,6 @@ fn addDbgVar( // real `block` instruction. if (block.need_debug_scope) |ptr| ptr.* = true; - try sema.queueFullTypeResolution(operand_ty); - // Add the name to the AIR. const name_extra_index: u32 = @intCast(sema.air_extra.items.len); const elements_used = name.len / 4 + 1; @@ -6832,14 +6817,8 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref if (!block.ownerModule().error_tracing) return .none; - const stack_trace_ty = sema.getBuiltinType("StackTrace") catch |err| switch (err) { - error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, - else => |e| return e, - }; - sema.resolveTypeFields(stack_trace_ty) catch |err| switch (err) { - error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, - else => |e| return e, - }; + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, LazySrcLoc.unneeded) catch |err| switch (err) { error.AnalysisFail => @panic("std.builtin.StackTrace is corrupt"), @@ -6879,8 +6858,8 @@ fn popErrorReturnTrace( // AstGen determined this result does not go to an error-handling expr (try/catch/return etc.), or // the result is comptime-known to be a non-error. Either way, pop unconditionally. - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); @@ -6905,8 +6884,8 @@ fn popErrorReturnTrace( defer then_block.instructions.deinit(gpa); // If non-error, then pop the error return trace by restoring the index. - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty); const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); @@ -7032,8 +7011,8 @@ fn zirCall( // If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only // need to clean-up our own trace if we were passed to a non-error-handling expression. if (input_is_error or (pop_error_return_trace and return_ty.isError(mod))) { - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "index", .no_embedded_nulls); const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src); @@ -7264,10 +7243,6 @@ const CallArgsInfo = union(enum) { ) CompileError!Air.Inst.Ref { const mod = sema.mod; const param_count = func_ty_info.param_types.len; - if (maybe_param_ty) |param_ty| switch (param_ty.toIntern()) { - .generic_poison_type => {}, - else => try sema.queueFullTypeResolution(param_ty), - }; const uncoerced_arg: Air.Inst.Ref = switch (cai) { inline .resolved, .call_builtin => |resolved| resolved.args[arg_index], .zir_call => |zir_call| arg_val: { @@ -7494,24 +7469,19 @@ fn analyzeCall( const gpa = sema.gpa; - var is_generic_call = func_ty_info.is_generic; + const is_generic_call = func_ty_info.is_generic; var is_comptime_call = block.is_comptime or modifier == .compile_time; var is_inline_call = is_comptime_call or modifier == .always_inline or func_ty_info.cc == .Inline; var comptime_reason: ?*const Block.ComptimeReason = null; if (!is_inline_call and !is_comptime_call) { - if (sema.typeRequiresComptime(Type.fromInterned(func_ty_info.return_type))) |ct| { - is_comptime_call = ct; - is_inline_call = ct; - if (ct) { - comptime_reason = &.{ .comptime_ret_ty = .{ - .func = func, - .func_src = func_src, - .return_ty = Type.fromInterned(func_ty_info.return_type), - } }; - } - } else |err| switch (err) { - error.GenericPoison => is_generic_call = true, - else => |e| return e, + if (try sema.typeRequiresComptime(Type.fromInterned(func_ty_info.return_type))) { + is_comptime_call = true; + is_inline_call = true; + comptime_reason = &.{ .comptime_ret_ty = .{ + .func = func, + .func_src = func_src, + .return_ty = Type.fromInterned(func_ty_info.return_type), + } }; } } @@ -7871,7 +7841,6 @@ fn analyzeCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - try sema.queueFullTypeResolution(Type.fromInterned(func_ty_info.return_type)); if (sema.owner_func_index != .none and Type.fromInterned(func_ty_info.return_type).isError(mod)) { ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true; } @@ -8281,7 +8250,6 @@ fn instantiateGenericCall( } } else { // The parameter is runtime-known. - try sema.queueFullTypeResolution(arg_ty); child_sema.inst_map.putAssumeCapacityNoClobber(param_inst, try child_block.addInst(.{ .tag = .arg, .data = .{ .arg = .{ @@ -8330,8 +8298,6 @@ fn instantiateGenericCall( return error.GenericPoison; } - try sema.queueFullTypeResolution(Type.fromInterned(func_ty_info.return_type)); - if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); if (sema.owner_func_index != .none and @@ -8423,7 +8389,7 @@ fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil else => |e| return e, }; const indexable_ty = maybe_wrapped_indexable_ty.optEuBaseType(mod); - try sema.resolveTypeFields(indexable_ty); + try indexable_ty.resolveFields(mod); assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction if (indexable_ty.zigTypeTag(mod) == .Struct) { const elem_type = indexable_ty.structFieldType(@intFromEnum(bin.rhs), mod); @@ -8687,7 +8653,7 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD const operand = try sema.coerce(block, err_int_ty, uncasted_operand, operand_src); if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| { - const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntAdvanced(sema)); + const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntSema(mod)); if (int > mod.global_error_set.count() or int == 0) return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int}); return Air.internedToRef((try mod.intern(.{ .err = .{ @@ -8791,7 +8757,7 @@ fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(mod)) { .Enum => operand, .Union => blk: { - try sema.resolveTypeFields(operand_ty); + try operand_ty.resolveFields(mod); const tag_ty = operand_ty.unionTagType(mod) orelse { return sema.fail( block, @@ -8933,7 +8899,7 @@ fn analyzeOptionalPayloadPtr( } const child_type = opt_type.optionalChild(zcu); - const child_pointer = try sema.ptrType(.{ + const child_pointer = try zcu.ptrTypeSema(.{ .child = child_type.toIntern(), .flags = .{ .is_const = optional_ptr_ty.isConstPtr(zcu), @@ -8957,13 +8923,13 @@ fn analyzeOptionalPayloadPtr( const opt_payload_ptr = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr); try sema.checkKnownAllocPtr(block, optional_ptr, opt_payload_ptr); } - return Air.internedToRef((try ptr_val.ptrOptPayload(sema)).toIntern()); + return Air.internedToRef((try ptr_val.ptrOptPayload(zcu)).toIntern()); } if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| { if (val.isNull(zcu)) { return sema.fail(block, src, "unable to unwrap null", .{}); } - return Air.internedToRef((try ptr_val.ptrOptPayload(sema)).toIntern()); + return Air.internedToRef((try ptr_val.ptrOptPayload(zcu)).toIntern()); } } @@ -9006,7 +8972,7 @@ fn zirOptionalPayload( // TODO https://github.com/ziglang/zig/issues/6597 if (true) break :t operand_ty; const ptr_info = operand_ty.ptrInfo(mod); - break :t try sema.ptrType(.{ + break :t try mod.ptrTypeSema(.{ .child = ptr_info.child, .flags = .{ .alignment = ptr_info.flags.alignment, @@ -9124,7 +9090,7 @@ fn analyzeErrUnionPayloadPtr( const err_union_ty = operand_ty.childType(zcu); const payload_ty = err_union_ty.errorUnionPayload(zcu); - const operand_pointer_ty = try sema.ptrType(.{ + const operand_pointer_ty = try zcu.ptrTypeSema(.{ .child = payload_ty.toIntern(), .flags = .{ .is_const = operand_ty.isConstPtr(zcu), @@ -9149,13 +9115,13 @@ fn analyzeErrUnionPayloadPtr( const eu_payload_ptr = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand); try sema.checkKnownAllocPtr(block, operand, eu_payload_ptr); } - return Air.internedToRef((try ptr_val.ptrEuPayload(sema)).toIntern()); + return Air.internedToRef((try ptr_val.ptrEuPayload(zcu)).toIntern()); } if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| { if (val.getErrorName(zcu).unwrap()) |name| { return sema.failWithComptimeErrorRetTrace(block, src, name); } - return Air.internedToRef((try ptr_val.ptrEuPayload(sema)).toIntern()); + return Air.internedToRef((try ptr_val.ptrEuPayload(zcu)).toIntern()); } } @@ -9603,17 +9569,8 @@ fn funcCommon( } } - var ret_ty_requires_comptime = false; - const ret_poison = if (sema.typeRequiresComptime(bare_return_type)) |ret_comptime| rp: { - ret_ty_requires_comptime = ret_comptime; - break :rp bare_return_type.isGenericPoison(); - } else |err| switch (err) { - error.GenericPoison => rp: { - is_generic = true; - break :rp true; - }, - else => |e| return e, - }; + const ret_ty_requires_comptime = try sema.typeRequiresComptime(bare_return_type); + const ret_poison = bare_return_type.isGenericPoison(); const final_is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime; const param_types = block.params.items(.ty); @@ -9961,8 +9918,8 @@ fn finishFunc( if (!final_is_generic and sema.wantErrorReturnTracing(return_type)) { // Make sure that StackTrace's fields are resolved so that the backend can // lower this fn type. - const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(unresolved_stack_trace_ty); + const unresolved_stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try unresolved_stack_trace_ty.resolveFields(mod); } return Air.internedToRef(if (opt_func_index != .none) opt_func_index else func_ty); @@ -10021,21 +9978,7 @@ fn zirParam( } }; - const is_comptime = sema.typeRequiresComptime(param_ty) catch |err| switch (err) { - error.GenericPoison => { - // The type is not available until the generic instantiation. - // We result the param instruction with a poison value and - // insert an anytype parameter. - try block.params.append(sema.arena, .{ - .ty = .generic_poison_type, - .is_comptime = comptime_syntax, - .name = param_name, - }); - sema.inst_map.putAssumeCapacity(inst, .generic_poison); - return; - }, - else => |e| return e, - } or comptime_syntax; + const is_comptime = try sema.typeRequiresComptime(param_ty) or comptime_syntax; try block.params.append(sema.arena, .{ .ty = param_ty.toIntern(), @@ -10162,7 +10105,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } return Air.internedToRef((try zcu.intValue( Type.usize, - (try operand_val.getUnsignedIntAdvanced(zcu, sema)).?, + (try operand_val.getUnsignedIntAdvanced(zcu, .sema)).?, )).toIntern()); } const len = operand_ty.vectorLen(zcu); @@ -10174,7 +10117,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! new_elem.* = (try zcu.undefValue(Type.usize)).toIntern(); continue; } - const addr = try ptr_val.getUnsignedIntAdvanced(zcu, sema) orelse { + const addr = try ptr_val.getUnsignedIntAdvanced(zcu, .sema) orelse { // A vector element wasn't an integer pointer. This is a runtime operation. break :ct; }; @@ -11047,7 +10990,7 @@ const SwitchProngAnalysis = struct { const union_obj = zcu.typeToUnion(operand_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); if (capture_byref) { - const ptr_field_ty = try sema.ptrType(.{ + const ptr_field_ty = try zcu.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = !operand_ptr_ty.ptrIsMutable(zcu), @@ -11056,7 +10999,7 @@ const SwitchProngAnalysis = struct { }, }); if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |union_ptr| { - return Air.internedToRef((try union_ptr.ptrField(field_index, sema)).toIntern()); + return Air.internedToRef((try union_ptr.ptrField(field_index, zcu)).toIntern()); } return block.addStructFieldPtr(spa.operand_ptr, field_index, ptr_field_ty); } else { @@ -11150,7 +11093,7 @@ const SwitchProngAnalysis = struct { const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len); for (field_indices, dummy_captures) |field_idx, *dummy| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); - const field_ptr_ty = try sema.ptrType(.{ + const field_ptr_ty = try zcu.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = operand_ptr_info.flags.is_const, @@ -11186,7 +11129,7 @@ const SwitchProngAnalysis = struct { if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |op_ptr_val| { if (op_ptr_val.isUndef(zcu)) return zcu.undefRef(capture_ptr_ty); - const field_ptr_val = try op_ptr_val.ptrField(first_field_index, sema); + const field_ptr_val = try op_ptr_val.ptrField(first_field_index, zcu); return Air.internedToRef((try zcu.getCoerced(field_ptr_val, capture_ptr_ty)).toIntern()); } @@ -11399,7 +11342,7 @@ fn switchCond( }, .Union => { - try sema.resolveTypeFields(operand_ty); + try operand_ty.resolveFields(mod); const enum_ty = operand_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(src, "switch on union with no attached enum", .{}); @@ -13691,7 +13634,7 @@ fn maybeErrorUnwrap( return true; } - const panic_fn = try sema.getBuiltin("panicUnwrapError"); + const panic_fn = try mod.getBuiltin("panicUnwrapError"); const err_return_trace = try sema.getErrorReturnTrace(block); const args: [2]Air.Inst.Ref = .{ err_return_trace, operand }; try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check"); @@ -13701,7 +13644,7 @@ fn maybeErrorUnwrap( const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const msg_inst = try sema.resolveInst(inst_data.operand); - const panic_fn = try sema.getBuiltin("panic"); + const panic_fn = try mod.getBuiltin("panic"); const err_return_trace = try sema.getErrorReturnTrace(block); const args: [3]Air.Inst.Ref = .{ msg_inst, err_return_trace, .null_value }; try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check"); @@ -13766,7 +13709,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_name = try sema.resolveConstStringIntern(block, name_src, extra.rhs, .{ .needed_comptime_reason = "field name must be comptime-known", }); - try sema.resolveTypeFields(ty); + try ty.resolveFields(mod); const ip = &mod.intern_pool; const has_field = hf: { @@ -13946,7 +13889,7 @@ fn zirShl( return mod.undefRef(sema.typeOf(lhs)); } // If rhs is 0, return lhs without doing any calculations. - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return lhs; } if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) { @@ -14111,7 +14054,7 @@ fn zirShr( return mod.undefRef(lhs_ty); } // If rhs is 0, return lhs without doing any calculations. - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return lhs; } if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) { @@ -14158,7 +14101,7 @@ fn zirShr( if (air_tag == .shr_exact) { // Detect if any ones would be shifted out. const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, mod); - if (!(try truncated.compareAllWithZeroAdvanced(.eq, sema))) { + if (!(try truncated.compareAllWithZeroSema(.eq, mod))) { return sema.fail(block, src, "exact shift shifted out 1 bits", .{}); } } @@ -14582,12 +14525,12 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.requireRuntimeBlock(block, src, runtime_src); if (ptr_addrspace) |ptr_as| { - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = ptr_as }, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try sema.ptrType(.{ + const elem_ptr_ty = try mod.ptrTypeSema(.{ .child = resolved_elem_ty.toIntern(), .flags = .{ .address_space = ptr_as }, }); @@ -14670,7 +14613,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins .none => null, else => Value.fromInterned(ptr_info.sentinel), }, - .len = try val.sliceLen(sema), + .len = try val.sliceLen(mod), }; }, .One => { @@ -14912,12 +14855,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (ptr_addrspace) |ptr_as| { - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = ptr_as }, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try sema.ptrType(.{ + const elem_ptr_ty = try mod.ptrTypeSema(.{ .child = lhs_info.elem_type.toIntern(), .flags = .{ .address_space = ptr_as }, }); @@ -15105,7 +15048,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins .Int, .ComptimeInt, .ComptimeFloat => { if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15120,7 +15063,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } // TODO: if the RHS is one, return the LHS directly @@ -15241,7 +15184,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } else { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15256,7 +15199,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } // TODO: if the RHS is one, return the LHS directly @@ -15408,7 +15351,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15423,7 +15366,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } // TODO: if the RHS is one, return the LHS directly @@ -15518,7 +15461,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15533,7 +15476,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } } @@ -15758,7 +15701,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15777,18 +15720,18 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.gte, mod))) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { const rem_result = try sema.intRem(resolved_type, lhs_val, rhs_val); // If this answer could possibly be different by doing `intMod`, // we must emit a compile error. Otherwise, it's OK. - if (!(try lhs_val.compareAllWithZeroAdvanced(.gte, sema)) and - !(try rem_result.compareAllWithZeroAdvanced(.eq, sema))) + if (!(try lhs_val.compareAllWithZeroSema(.gte, mod)) and + !(try rem_result.compareAllWithZeroSema(.eq, mod))) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } @@ -15806,14 +15749,14 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.gte, mod))) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))) { + if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroSema(.gte, mod))) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod)).toIntern()); @@ -15864,8 +15807,8 @@ fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileErr // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs_q = try sema.arena.alloc( math.big.Limb, lhs_bigint.limbs.len, @@ -15941,7 +15884,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } if (maybe_lhs_val) |lhs_val| { @@ -15957,7 +15900,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } } @@ -16036,7 +15979,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } if (maybe_lhs_val) |lhs_val| { @@ -16052,7 +15995,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } } @@ -16139,12 +16082,12 @@ fn zirOverflowArithmetic( // to the result, even if it is undefined.. // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, mod))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } @@ -16165,7 +16108,7 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; - } else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + } else if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { @@ -16184,7 +16127,7 @@ fn zirOverflowArithmetic( const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1); if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; @@ -16194,7 +16137,7 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod)) { - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; @@ -16218,12 +16161,12 @@ fn zirOverflowArithmetic( // If rhs is zero, the result is lhs (even if undefined) and no overflow occurred. // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, mod))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } @@ -16374,7 +16317,7 @@ fn analyzeArithmetic( // overflow (max_int), causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { return casted_rhs; } } @@ -16386,7 +16329,7 @@ fn analyzeArithmetic( return mod.undefRef(resolved_type); } } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } } @@ -16418,7 +16361,7 @@ fn analyzeArithmetic( // If either of the operands are zero, the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { return casted_rhs; } } @@ -16426,7 +16369,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -16439,7 +16382,7 @@ fn analyzeArithmetic( // If either of the operands are zero, then the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { return casted_rhs; } } @@ -16447,7 +16390,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -16488,7 +16431,7 @@ fn analyzeArithmetic( return mod.undefRef(resolved_type); } } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } } @@ -16523,7 +16466,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } } @@ -16544,7 +16487,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } } @@ -16591,7 +16534,7 @@ fn analyzeArithmetic( if (lhs_val.isNan(mod)) { return Air.internedToRef(lhs_val.toIntern()); } - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) lz: { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) lz: { if (maybe_rhs_val) |rhs_val| { if (rhs_val.isNan(mod)) { return Air.internedToRef(rhs_val.toIntern()); @@ -16622,7 +16565,7 @@ fn analyzeArithmetic( if (rhs_val.isNan(mod)) { return Air.internedToRef(rhs_val.toIntern()); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) rz: { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) rz: { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isInf(mod)) { return Air.internedToRef((try mod.floatValue(resolved_type, std.math.nan(f128))).toIntern()); @@ -16674,7 +16617,7 @@ fn analyzeArithmetic( }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16687,7 +16630,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16719,7 +16662,7 @@ fn analyzeArithmetic( }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16732,7 +16675,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16828,7 +16771,7 @@ fn analyzePtrArithmetic( const new_ptr_ty = t: { // Calculate the new pointer alignment. - // This code is duplicated in `elemPtrType`. + // This code is duplicated in `Type.elemPtrType`. if (ptr_info.flags.alignment == .none) { // ABI-aligned pointer. Any pointer arithmetic maintains the same ABI-alignedness. break :t ptr_ty; @@ -16837,7 +16780,7 @@ fn analyzePtrArithmetic( // it being a multiple of the type size. const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child)); const addend = if (opt_off_val) |off_val| a: { - const off_int = try sema.usizeCast(block, offset_src, try off_val.toUnsignedIntAdvanced(sema)); + const off_int = try sema.usizeCast(block, offset_src, try off_val.toUnsignedIntSema(mod)); break :a elem_size * off_int; } else elem_size; @@ -16850,7 +16793,7 @@ fn analyzePtrArithmetic( )); assert(new_align != .none); - break :t try sema.ptrType(.{ + break :t try mod.ptrTypeSema(.{ .child = ptr_info.child, .sentinel = ptr_info.sentinel, .flags = .{ @@ -16869,14 +16812,14 @@ fn analyzePtrArithmetic( if (opt_off_val) |offset_val| { if (ptr_val.isUndef(mod)) return mod.undefRef(new_ptr_ty); - const offset_int = try sema.usizeCast(block, offset_src, try offset_val.toUnsignedIntAdvanced(sema)); + const offset_int = try sema.usizeCast(block, offset_src, try offset_val.toUnsignedIntSema(mod)); if (offset_int == 0) return ptr; if (air_tag == .ptr_sub) { const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child)); const new_ptr_val = try sema.ptrSubtract(block, op_src, ptr_val, offset_int * elem_size, new_ptr_ty); return Air.internedToRef(new_ptr_val.toIntern()); } else { - const new_ptr_val = try mod.getCoerced(try ptr_val.ptrElem(offset_int, sema), new_ptr_ty); + const new_ptr_val = try mod.getCoerced(try ptr_val.ptrElem(offset_int, mod), new_ptr_ty); return Air.internedToRef(new_ptr_val.toIntern()); } } else break :rs offset_src; @@ -16975,7 +16918,6 @@ fn zirAsm( // Indicate the output is the asm instruction return value. arg.* = .none; const out_ty = try sema.resolveType(block, ret_ty_src, output.data.operand); - try sema.queueFullTypeResolution(out_ty); expr_ty = Air.internedToRef(out_ty.toIntern()); } else { arg.* = try sema.resolveInst(output.data.operand); @@ -17010,7 +16952,6 @@ fn zirAsm( .ComptimeFloat => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src), else => { arg.* = uncasted_arg; - try sema.queueFullTypeResolution(uncasted_arg_ty); }, } @@ -17169,7 +17110,7 @@ fn analyzeCmpUnionTag( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const union_ty = sema.typeOf(un); - try sema.resolveTypeFields(union_ty); + try union_ty.resolveFields(mod); const union_tag_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(un_src, "comparison of union and enum literal is only valid for tagged union types", .{}); @@ -17385,9 +17326,6 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. => {}, } const val = try ty.lazyAbiSize(mod); - if (val.isLazySize(mod)) { - try sema.queueFullTypeResolution(ty); - } return Air.internedToRef(val.toIntern()); } @@ -17427,7 +17365,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .AnyFrame, => {}, } - const bit_size = try operand_ty.bitSizeAdvanced(mod, sema); + const bit_size = try operand_ty.bitSizeAdvanced(mod, .sema); return mod.intRef(Type.comptime_int, bit_size); } @@ -17613,7 +17551,7 @@ fn zirBuiltinSrc( } }); }; - const src_loc_ty = try sema.getBuiltinType("SourceLocation"); + const src_loc_ty = try mod.getBuiltinType("SourceLocation"); const fields = .{ // file: [:0]const u8, file_name_val, @@ -17637,7 +17575,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ty = try sema.resolveType(block, src, inst_data.operand); - const type_info_ty = try sema.getBuiltinType("Type"); + const type_info_ty = try mod.getBuiltinType("Type"); const type_info_tag_ty = type_info_ty.unionTagType(mod).?; if (ty.typeDeclInst(mod)) |type_decl_inst| { @@ -17718,7 +17656,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = new_decl_ty.toIntern(), .storage = .{ .elems = param_vals }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = param_info_ty.toIntern(), .flags = .{ .size = .Slice, @@ -17748,7 +17686,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai func_ty_info.return_type, } }); - const callconv_ty = try sema.getBuiltinType("CallingConvention"); + const callconv_ty = try mod.getBuiltinType("CallingConvention"); const field_values = .{ // calling_convention: CallingConvention, @@ -17782,7 +17720,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const int_info_decl = mod.declPtr(int_info_decl_index); const int_info_ty = int_info_decl.val.toType(); - const signedness_ty = try sema.getBuiltinType("Signedness"); + const signedness_ty = try mod.getBuiltinType("Signedness"); const info = ty.intInfo(mod); const field_values = .{ // signedness: Signedness, @@ -17830,12 +17768,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try Type.fromInterned(info.child).lazyAbiAlignment(mod); - const addrspace_ty = try sema.getBuiltinType("AddressSpace"); + const addrspace_ty = try mod.getBuiltinType("AddressSpace"); const pointer_ty = t: { const decl_index = (try sema.namespaceLookup( block, src, - (try sema.getBuiltinType("Type")).getNamespaceIndex(mod), + (try mod.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, "Pointer", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); @@ -17984,8 +17922,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t set_field_ty_decl.val.toType(); }; - try sema.queueFullTypeResolution(error_field_ty); - // Build our list of Error values // Optional value is only null if anyerror // Value can be zero-length slice otherwise @@ -18036,7 +17972,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; // Build our ?[]const Error value - const slice_errors_ty = try sema.ptrType(.{ + const slice_errors_ty = try mod.ptrTypeSema(.{ .child = error_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18182,7 +18118,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = fields_array_ty.toIntern(), .storage = .{ .elems = enum_field_vals }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = enum_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18262,7 +18198,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t union_field_ty_decl.val.toType(); }; - try sema.resolveTypeLayout(ty); // Getting alignment requires type layout + try ty.resolveLayout(mod); // Getting alignment requires type layout const union_obj = mod.typeToUnion(ty).?; const tag_type = union_obj.loadTagType(ip); const layout = union_obj.getLayout(ip); @@ -18298,7 +18234,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; const alignment = switch (layout) { - .auto, .@"extern" => try sema.unionFieldAlignment(union_obj, @intCast(field_index)), + .auto, .@"extern" => try mod.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(field_index), .sema), .@"packed" => .none, }; @@ -18326,7 +18262,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = array_fields_ty.toIntern(), .storage = .{ .elems = union_field_vals }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = union_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18359,7 +18295,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const decl_index = (try sema.namespaceLookup( block, src, - (try sema.getBuiltinType("Type")).getNamespaceIndex(mod), + (try mod.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); @@ -18412,7 +18348,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t struct_field_ty_decl.val.toType(); }; - try sema.resolveTypeLayout(ty); // Getting alignment requires type layout + try ty.resolveLayout(mod); // Getting alignment requires type layout var struct_field_vals: []InternPool.Index = &.{}; defer gpa.free(struct_field_vals); @@ -18452,7 +18388,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }); }; - try sema.resolveTypeLayout(Type.fromInterned(field_ty)); + try Type.fromInterned(field_ty).resolveLayout(mod); const is_comptime = field_val != .none; const opt_default_val = if (is_comptime) Value.fromInterned(field_val) else null; @@ -18481,7 +18417,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; struct_field_vals = try gpa.alloc(InternPool.Index, struct_type.field_types.len); - try sema.resolveStructFieldInits(ty); + try ty.resolveStructFieldInits(mod); for (struct_field_vals, 0..) |*field_val, field_index| { const field_name = if (struct_type.fieldName(ip, field_index).unwrap()) |field_name| @@ -18520,10 +18456,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const default_val_ptr = try sema.optRefValue(opt_default_val); const alignment = switch (struct_type.layout) { .@"packed" => .none, - else => try sema.structFieldAlignment( + else => try mod.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, field_index), field_ty, struct_type.layout, + .sema, ), }; @@ -18555,7 +18492,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = array_fields_ty.toIntern(), .storage = .{ .elems = struct_field_vals }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = struct_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18591,7 +18528,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const decl_index = (try sema.namespaceLookup( block, src, - (try sema.getBuiltinType("Type")).getNamespaceIndex(mod), + (try mod.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); @@ -18635,7 +18572,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t type_opaque_ty_decl.val.toType(); }; - try sema.resolveTypeFields(ty); + try ty.resolveFields(mod); const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod)); const field_values = .{ @@ -18677,7 +18614,6 @@ fn typeInfoDecls( const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index); break :t declaration_ty_decl.val.toType(); }; - try sema.queueFullTypeResolution(declaration_ty); var decl_vals = std.ArrayList(InternPool.Index).init(gpa); defer decl_vals.deinit(); @@ -18695,7 +18631,7 @@ fn typeInfoDecls( .ty = array_decl_ty.toIntern(), .storage = .{ .elems = decl_vals.items }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = declaration_ty.toIntern(), .flags = .{ .size = .Slice, @@ -19295,7 +19231,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr const operand_ty = sema.typeOf(operand); const ptr_info = operand_ty.ptrInfo(mod); - const res_ty = try sema.ptrType(.{ + const res_ty = try mod.ptrTypeSema(.{ .child = err_union_ty.errorUnionPayload(mod).toIntern(), .flags = .{ .is_const = ptr_info.flags.is_const, @@ -19528,11 +19464,11 @@ fn retWithErrTracing( else => true, }; const gpa = sema.gpa; - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); - const return_err_fn = try sema.getBuiltin("returnError"); + const return_err_fn = try mod.getBuiltin("returnError"); const args: [1]Air.Inst.Ref = .{err_return_trace}; if (!need_check) { @@ -19735,7 +19671,7 @@ fn analyzeRet( return sema.failWithOwnedErrorMsg(block, msg); } - try sema.resolveTypeLayout(sema.fn_ret_ty); + try sema.fn_ret_ty.resolveLayout(mod); try sema.validateRuntimeValue(block, operand_src, operand); @@ -19817,7 +19753,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }, else => {}, } - const align_bytes = (try val.getUnsignedIntAdvanced(mod, sema)).?; + const align_bytes = (try val.getUnsignedIntAdvanced(mod, .sema)).?; break :blk try sema.validateAlignAllowZero(block, align_src, align_bytes); } else .none; @@ -19851,7 +19787,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air elem_ty.fmt(mod), bit_offset, bit_offset - host_size * 8, host_size, }); } - const elem_bit_size = try elem_ty.bitSizeAdvanced(mod, sema); + const elem_bit_size = try elem_ty.bitSizeAdvanced(mod, .sema); if (elem_bit_size > host_size * 8 - bit_offset) { return sema.fail(block, bitoffset_src, "packed type '{}' at bit offset {} ends {} bits after the end of a {} byte host integer", .{ elem_ty.fmt(mod), bit_offset, elem_bit_size - (host_size * 8 - bit_offset), host_size, @@ -19892,7 +19828,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }); } - const ty = try sema.ptrType(.{ + const ty = try mod.ptrTypeSema(.{ .child = elem_ty.toIntern(), .sentinel = sentinel, .flags = .{ @@ -19983,7 +19919,7 @@ fn structInitEmpty( const mod = sema.mod; const gpa = sema.gpa; // This logic must be synchronized with that in `zirStructInit`. - try sema.resolveTypeFields(struct_ty); + try struct_ty.resolveFields(mod); // The init values to use for the struct instance. const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount(mod)); @@ -20054,7 +19990,6 @@ fn unionInit( try sema.requireRuntimeBlock(block, init_src, null); _ = union_ty_src; - try sema.queueFullTypeResolution(union_ty); return block.addUnionInit(union_ty, field_index, init); } @@ -20083,7 +20018,7 @@ fn zirStructInit( else => |e| return e, }; const resolved_ty = result_ty.optEuBaseType(mod); - try sema.resolveTypeLayout(resolved_ty); + try resolved_ty.resolveLayout(mod); if (resolved_ty.zigTypeTag(mod) == .Struct) { // This logic must be synchronized with that in `zirStructInitEmpty`. @@ -20124,7 +20059,7 @@ fn zirStructInit( const field_ty = resolved_ty.structFieldType(field_index, mod); field_inits[field_index] = try sema.coerce(block, field_ty, uncoerced_init, field_src); if (!is_packed) { - try sema.resolveStructFieldInits(resolved_ty); + try resolved_ty.resolveStructFieldInits(mod); if (try resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| { const init_val = (try sema.resolveValue(field_inits[field_index])) orelse { return sema.failWithNeededComptime(block, field_src, .{ @@ -20197,7 +20132,7 @@ fn zirStructInit( if (is_ref) { const target = mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20211,7 +20146,6 @@ fn zirStructInit( } try sema.requireRuntimeBlock(block, src, null); - try sema.queueFullTypeResolution(resolved_ty); const union_val = try block.addUnionInit(resolved_ty, field_index, init_inst); return sema.coerce(block, result_ty, union_val, src); } @@ -20288,7 +20222,7 @@ fn finishStructInit( continue; } - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const field_init = struct_type.fieldInit(ip, i); if (field_init == .none) { @@ -20358,9 +20292,9 @@ fn finishStructInit( } if (is_ref) { - try sema.resolveStructLayout(struct_ty); + try struct_ty.resolveLayout(mod); const target = sema.mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20380,8 +20314,7 @@ fn finishStructInit( .init_node_offset = init_src.offset.node_offset.x, .elem_index = @intCast(runtime_index), } })); - try sema.resolveStructFieldInits(struct_ty); - try sema.queueFullTypeResolution(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const struct_val = try block.addAggregateInit(struct_ty, field_inits); return sema.coerce(block, result_ty, struct_val, init_src); } @@ -20490,7 +20423,7 @@ fn structInitAnon( if (is_ref) { const target = mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = tuple_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20504,7 +20437,7 @@ fn structInitAnon( }; extra_index = item.end; - const field_ptr_ty = try sema.ptrType(.{ + const field_ptr_ty = try mod.ptrTypeSema(.{ .child = field_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20597,7 +20530,7 @@ fn zirArrayInit( dest.* = try sema.coerce(block, elem_ty, resolved_arg, elem_src); if (is_tuple) { if (array_ty.structFieldIsComptime(i, mod)) - try sema.resolveStructFieldInits(array_ty); + try array_ty.resolveStructFieldInits(mod); if (try array_ty.structFieldValueComptime(mod, i)) |field_val| { const init_val = try sema.resolveValue(dest.*) orelse { return sema.failWithNeededComptime(block, elem_src, .{ @@ -20641,11 +20574,10 @@ fn zirArrayInit( .init_node_offset = src.offset.node_offset.x, .elem_index = runtime_index, } })); - try sema.queueFullTypeResolution(array_ty); if (is_ref) { const target = mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20654,7 +20586,7 @@ fn zirArrayInit( if (is_tuple) { for (resolved_args, 0..) |arg, i| { - const elem_ptr_ty = try sema.ptrType(.{ + const elem_ptr_ty = try mod.ptrTypeSema(.{ .child = array_ty.structFieldType(i, mod).toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20667,7 +20599,7 @@ fn zirArrayInit( return sema.makePtrConst(block, alloc); } - const elem_ptr_ty = try sema.ptrType(.{ + const elem_ptr_ty = try mod.ptrTypeSema(.{ .child = array_ty.elemType2(mod).toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20755,14 +20687,14 @@ fn arrayInitAnon( if (is_ref) { const target = sema.mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = tuple_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const alloc = try block.addTy(.alloc, alloc_ty); for (operands, 0..) |operand, i_usize| { const i: u32 = @intCast(i_usize); - const field_ptr_ty = try sema.ptrType(.{ + const field_ptr_ty = try mod.ptrTypeSema(.{ .child = types[i], .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20832,7 +20764,7 @@ fn fieldType( const ip = &mod.intern_pool; var cur_ty = aggregate_ty; while (true) { - try sema.resolveTypeFields(cur_ty); + try cur_ty.resolveFields(mod); switch (cur_ty.zigTypeTag(mod)) { .Struct => switch (ip.indexToKey(cur_ty.toIntern())) { .anon_struct_type => |anon_struct| { @@ -20883,8 +20815,8 @@ fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { const mod = sema.mod; const ip = &mod.intern_pool; - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern()); @@ -20918,9 +20850,6 @@ fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)}); } const val = try ty.lazyAbiAlignment(mod); - if (val.isLazyAlign(mod)) { - try sema.queueFullTypeResolution(ty); - } return Air.internedToRef(val.toIntern()); } @@ -21095,7 +21024,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeLayout(operand_ty); + try operand_ty.resolveLayout(mod); const enum_ty = switch (operand_ty.zigTypeTag(mod)) { .EnumLiteral => { const val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, operand, undefined); @@ -21171,7 +21100,7 @@ fn zirReify( }, }, }; - const type_info_ty = try sema.getBuiltinType("Type"); + const type_info_ty = try mod.getBuiltinType("Type"); const uncasted_operand = try sema.resolveInst(extra.operand); const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src); const val = try sema.resolveConstDefinedValue(block, operand_src, type_info, .{ @@ -21205,7 +21134,7 @@ fn zirReify( ); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); - const bits: u16 = @intCast(try bits_val.toUnsignedIntAdvanced(sema)); + const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(mod)); const ty = try mod.intType(signedness, bits); return Air.internedToRef(ty.toIntern()); }, @@ -21220,7 +21149,7 @@ fn zirReify( try ip.getOrPutString(gpa, "child", .no_embedded_nulls), ).?); - const len: u32 = @intCast(try len_val.toUnsignedIntAdvanced(sema)); + const len: u32 = @intCast(try len_val.toUnsignedIntSema(mod)); const child_ty = child_val.toType(); try sema.checkVectorElemType(block, src, child_ty); @@ -21238,7 +21167,7 @@ fn zirReify( try ip.getOrPutString(gpa, "bits", .no_embedded_nulls), ).?); - const bits: u16 = @intCast(try bits_val.toUnsignedIntAdvanced(sema)); + const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(mod)); const ty = switch (bits) { 16 => Type.f16, 32 => Type.f32, @@ -21288,7 +21217,7 @@ fn zirReify( return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?; + const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(mod, .sema)).?; if (alignment_val_int > 0 and !math.isPowerOfTwo(alignment_val_int)) { return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{alignment_val_int}); } @@ -21296,7 +21225,7 @@ fn zirReify( const elem_ty = child_val.toType(); if (abi_align != .none) { - try sema.resolveTypeLayout(elem_ty); + try elem_ty.resolveLayout(mod); } const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val); @@ -21340,7 +21269,7 @@ fn zirReify( } } - const ty = try sema.ptrType(.{ + const ty = try mod.ptrTypeSema(.{ .child = elem_ty.toIntern(), .sentinel = actual_sentinel, .flags = .{ @@ -21369,7 +21298,7 @@ fn zirReify( try ip.getOrPutString(gpa, "sentinel", .no_embedded_nulls), ).?); - const len = try len_val.toUnsignedIntAdvanced(sema); + const len = try len_val.toUnsignedIntSema(mod); const child_ty = child_val.toType(); const sentinel = if (sentinel_val.optionalValue(mod)) |p| blk: { const ptr_ty = try mod.singleMutPtrType(child_ty); @@ -21476,7 +21405,7 @@ fn zirReify( const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); // Decls - if (try decls_val.sliceLen(sema) > 0) { + if (try decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified structs must have no decls", .{}); } @@ -21509,7 +21438,7 @@ fn zirReify( try ip.getOrPutString(gpa, "is_exhaustive", .no_embedded_nulls), ).?); - if (try decls_val.sliceLen(sema) > 0) { + if (try decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified enums must have no decls", .{}); } @@ -21527,7 +21456,7 @@ fn zirReify( ).?); // Decls - if (try decls_val.sliceLen(sema) > 0) { + if (try decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified opaque must have no decls", .{}); } @@ -21575,7 +21504,7 @@ fn zirReify( try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), ).?); - if (try decls_val.sliceLen(sema) > 0) { + if (try decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified unions must have no decls", .{}); } const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); @@ -21934,7 +21863,7 @@ fn reifyUnion( field_ty.* = field_type_val.toIntern(); if (any_aligns) { - const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntAdvanced(sema); + const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntSema(mod); if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) { // TODO: better source location return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align}); @@ -21979,7 +21908,7 @@ fn reifyUnion( field_ty.* = field_type_val.toIntern(); if (any_aligns) { - const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntAdvanced(sema); + const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntSema(mod); if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) { // TODO: better source location return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align}); @@ -22036,6 +21965,7 @@ fn reifyUnion( loaded_union.flagsPtr(ip).status = .have_field_types; try mod.finalizeAnonDecl(new_decl_index); + try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none)); } @@ -22109,7 +22039,7 @@ fn reifyStruct( if (field_is_comptime) any_comptime_fields = true; if (field_default_value != .none) any_default_inits = true; - switch (try field_alignment_val.orderAgainstZeroAdvanced(mod, sema)) { + switch (try field_alignment_val.orderAgainstZeroAdvanced(mod, .sema)) { .eq => {}, .gt => any_aligned_fields = true, .lt => unreachable, @@ -22192,7 +22122,7 @@ fn reifyStruct( return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const byte_align = try field_alignment_val.toUnsignedIntAdvanced(sema); + const byte_align = try field_alignment_val.toUnsignedIntSema(mod); if (byte_align == 0) { if (layout != .@"packed") { struct_type.field_aligns.get(ip)[field_idx] = .none; @@ -22278,7 +22208,7 @@ fn reifyStruct( var fields_bit_sum: u64 = 0; for (0..struct_type.field_types.len) |field_idx| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_idx]); - sema.resolveTypeLayout(field_ty) catch |err| switch (err) { + field_ty.resolveLayout(mod) catch |err| switch (err) { error.AnalysisFail => { const msg = sema.err orelse return err; try sema.errNote(src, msg, "while checking a field of this struct", .{}); @@ -22300,11 +22230,12 @@ fn reifyStruct( } try mod.finalizeAnonDecl(new_decl_index); + try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none)); } fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) CompileError!Air.Inst.Ref { - const va_list_ty = try sema.getBuiltinType("VaList"); + const va_list_ty = try sema.mod.getBuiltinType("VaList"); const va_list_ptr = try sema.mod.singleMutPtrType(va_list_ty); const inst = try sema.resolveInst(zir_ref); @@ -22343,7 +22274,7 @@ fn zirCVaCopy(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) const va_list_src = block.builtinCallArgSrc(extra.node, 0); const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand); - const va_list_ty = try sema.getBuiltinType("VaList"); + const va_list_ty = try sema.mod.getBuiltinType("VaList"); try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.c_va_copy, va_list_ty, va_list_ref); @@ -22363,7 +22294,7 @@ fn zirCVaEnd(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const src = block.nodeOffset(@bitCast(extended.operand)); - const va_list_ty = try sema.getBuiltinType("VaList"); + const va_list_ty = try sema.mod.getBuiltinType("VaList"); try sema.requireRuntimeBlock(block, src, null); return block.addInst(.{ .tag = .c_va_start, @@ -22497,7 +22428,7 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro _ = try sema.checkIntType(block, operand_src, operand_scalar_ty); if (try sema.resolveValue(operand)) |operand_val| { - const result_val = try operand_val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, mod, sema); + const result_val = try operand_val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, mod, .sema); return Air.internedToRef(result_val.toIntern()); } else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeFloat) { return sema.failWithNeededComptime(block, operand_src, .{ @@ -22545,7 +22476,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! try sema.checkPtrType(block, src, ptr_ty, true); const elem_ty = ptr_ty.elemType2(mod); - const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema); + const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, .sema); if (ptr_ty.isSlice(mod)) { const msg = msg: { @@ -22644,7 +22575,7 @@ fn ptrFromIntVal( } return sema.failWithUseOfUndef(block, operand_src); } - const addr = try operand_val.toUnsignedIntAdvanced(sema); + const addr = try operand_val.toUnsignedIntSema(zcu); if (!ptr_ty.isAllowzeroPtr(zcu) and addr == 0) return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(zcu)}); if (addr != 0 and ptr_align != .none and !ptr_align.check(addr)) @@ -22842,8 +22773,8 @@ fn ptrCastFull( const src_info = operand_ty.ptrInfo(mod); const dest_info = dest_ty.ptrInfo(mod); - try sema.resolveTypeLayout(Type.fromInterned(src_info.child)); - try sema.resolveTypeLayout(Type.fromInterned(dest_info.child)); + try Type.fromInterned(src_info.child).resolveLayout(mod); + try Type.fromInterned(dest_info.child).resolveLayout(mod); const src_slice_like = src_info.flags.size == .Slice or (src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(mod) == .Array); @@ -23091,7 +23022,7 @@ fn ptrCastFull( // Only convert to a many-pointer at first var info = dest_info; info.flags.size = .Many; - const ty = try sema.ptrType(info); + const ty = try mod.ptrTypeSema(info); if (dest_ty.zigTypeTag(mod) == .Optional) { break :blk try mod.optionalType(ty.toIntern()); } else { @@ -23109,7 +23040,7 @@ fn ptrCastFull( return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}); } if (dest_align.compare(.gt, src_align)) { - if (try ptr_val.getUnsignedIntAdvanced(mod, null)) |addr| { + if (try ptr_val.getUnsignedIntAdvanced(mod, .sema)) |addr| { if (!dest_align.check(addr)) { return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, @@ -23176,7 +23107,7 @@ fn ptrCastFull( // We can't change address spaces with a bitcast, so this requires two instructions var intermediate_info = src_info; intermediate_info.flags.address_space = dest_info.flags.address_space; - const intermediate_ptr_ty = try sema.ptrType(intermediate_info); + const intermediate_ptr_ty = try mod.ptrTypeSema(intermediate_info); const intermediate_ty = if (dest_ptr_ty.zigTypeTag(mod) == .Optional) blk: { break :blk try mod.optionalType(intermediate_ptr_ty.toIntern()); } else intermediate_ptr_ty; @@ -23233,7 +23164,7 @@ fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst if (flags.volatile_cast) ptr_info.flags.is_volatile = false; const dest_ty = blk: { - const dest_ty = try sema.ptrType(ptr_info); + const dest_ty = try mod.ptrTypeSema(ptr_info); if (operand_ty.zigTypeTag(mod) == .Optional) { break :blk try mod.optionalType(dest_ty.toIntern()); } @@ -23523,7 +23454,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeLayout(ty); + try ty.resolveLayout(mod); switch (ty.zigTypeTag(mod)) { .Struct => {}, else => return sema.fail(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(mod)}), @@ -23766,7 +23697,7 @@ fn checkAtomicPtrOperand( const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) { .Pointer => ptr_ty.ptrInfo(mod), else => { - const wanted_ptr_ty = try sema.ptrType(wanted_ptr_data); + const wanted_ptr_ty = try mod.ptrTypeSema(wanted_ptr_data); _ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); unreachable; }, @@ -23776,7 +23707,7 @@ fn checkAtomicPtrOperand( wanted_ptr_data.flags.is_allowzero = ptr_data.flags.is_allowzero; wanted_ptr_data.flags.is_volatile = ptr_data.flags.is_volatile; - const wanted_ptr_ty = try sema.ptrType(wanted_ptr_data); + const wanted_ptr_ty = try mod.ptrTypeSema(wanted_ptr_data); const casted_ptr = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); return casted_ptr; @@ -23953,7 +23884,7 @@ fn resolveExportOptions( const mod = sema.mod; const gpa = sema.gpa; const ip = &mod.intern_pool; - const export_options_ty = try sema.getBuiltinType("ExportOptions"); + const export_options_ty = try mod.getBuiltinType("ExportOptions"); const air_ref = try sema.resolveInst(zir_ref); const options = try sema.coerce(block, export_options_ty, air_ref, src); @@ -24017,7 +23948,7 @@ fn resolveBuiltinEnum( reason: NeededComptimeReason, ) CompileError!@field(std.builtin, name) { const mod = sema.mod; - const ty = try sema.getBuiltinType(name); + const ty = try mod.getBuiltinType(name); const air_ref = try sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, ty, air_ref, src); const val = try sema.resolveConstDefinedValue(block, src, coerced, reason); @@ -24777,7 +24708,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const extra = sema.code.extraData(Zir.Inst.BuiltinCall, inst_data.payload_index).data; const func = try sema.resolveInst(extra.callee); - const modifier_ty = try sema.getBuiltinType("CallModifier"); + const modifier_ty = try mod.getBuiltinType("CallModifier"); const air_ref = try sema.resolveInst(extra.modifier); const modifier_ref = try sema.coerce(block, modifier_ty, air_ref, modifier_src); const modifier_val = try sema.resolveConstDefinedValue(block, modifier_src, modifier_ref, .{ @@ -24881,7 +24812,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins .Struct, .Union => {}, else => return sema.fail(block, inst_src, "expected pointer to struct or union type, found '{}'", .{parent_ptr_ty.fmt(zcu)}), } - try sema.resolveTypeLayout(parent_ty); + try parent_ty.resolveLayout(zcu); const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, .{ .needed_comptime_reason = "field name must be comptime-known", @@ -24912,7 +24843,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins var actual_parent_ptr_info: InternPool.Key.PtrType = .{ .child = parent_ty.toIntern(), .flags = .{ - .alignment = try parent_ptr_ty.ptrAlignmentAdvanced(zcu, sema), + .alignment = try parent_ptr_ty.ptrAlignmentAdvanced(zcu, .sema), .is_const = field_ptr_info.flags.is_const, .is_volatile = field_ptr_info.flags.is_volatile, .is_allowzero = field_ptr_info.flags.is_allowzero, @@ -24924,7 +24855,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins var actual_field_ptr_info: InternPool.Key.PtrType = .{ .child = field_ty.toIntern(), .flags = .{ - .alignment = try field_ptr_ty.ptrAlignmentAdvanced(zcu, sema), + .alignment = try field_ptr_ty.ptrAlignmentAdvanced(zcu, .sema), .is_const = field_ptr_info.flags.is_const, .is_volatile = field_ptr_info.flags.is_volatile, .is_allowzero = field_ptr_info.flags.is_allowzero, @@ -24935,12 +24866,13 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins switch (parent_ty.containerLayout(zcu)) { .auto => { actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict( - if (zcu.typeToStruct(parent_ty)) |struct_obj| try sema.structFieldAlignment( + if (zcu.typeToStruct(parent_ty)) |struct_obj| try zcu.structFieldAlignmentAdvanced( struct_obj.fieldAlign(ip, field_index), field_ty, struct_obj.layout, + .sema, ) else if (zcu.typeToUnion(parent_ty)) |union_obj| - try sema.unionFieldAlignment(union_obj, field_index) + try zcu.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema) else actual_field_ptr_info.flags.alignment, ); @@ -24970,9 +24902,9 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins }, } - const actual_field_ptr_ty = try sema.ptrType(actual_field_ptr_info); + const actual_field_ptr_ty = try zcu.ptrTypeSema(actual_field_ptr_info); const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, field_ptr_src); - const actual_parent_ptr_ty = try sema.ptrType(actual_parent_ptr_info); + const actual_parent_ptr_ty = try zcu.ptrTypeSema(actual_parent_ptr_info); const result = if (try sema.resolveDefinedValue(block, field_ptr_src, casted_field_ptr)) |field_ptr_val| result: { switch (parent_ty.zigTypeTag(zcu)) { @@ -25032,7 +24964,6 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins break :result try sema.coerce(block, actual_parent_ptr_ty, Air.internedToRef(field.base), inst_src); } else result: { try sema.requireRuntimeBlock(block, inst_src, field_ptr_src); - try sema.queueFullTypeResolution(parent_ty); break :result try block.addInst(.{ .tag = .field_parent_ptr, .data = .{ .ty_pl = .{ @@ -25345,7 +25276,7 @@ fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !A // Already an array pointer. return ptr; } - const new_ty = try sema.ptrType(.{ + const new_ty = try mod.ptrTypeSema(.{ .child = (try mod.arrayType(.{ .len = len, .sentinel = info.sentinel, @@ -25444,7 +25375,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: { if (!sema.isComptimeMutablePtr(dest_ptr_val)) break :rs dest_src; if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| { - const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, sema)).?; + const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, .sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); for (0..len) |i| { const elem_index = try mod.intRef(Type.usize, i); @@ -25503,7 +25434,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void var new_dest_ptr = dest_ptr; var new_src_ptr = src_ptr; if (len_val) |val| { - const len = try val.toUnsignedIntAdvanced(sema); + const len = try val.toUnsignedIntSema(mod); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. return; @@ -25550,7 +25481,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void assert(dest_manyptr_ty_key.flags.size == .One); dest_manyptr_ty_key.child = dest_elem_ty.toIntern(); dest_manyptr_ty_key.flags.size = .Many; - break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(dest_manyptr_ty_key), new_dest_ptr, dest_src); + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(dest_manyptr_ty_key), new_dest_ptr, dest_src); } else new_dest_ptr; const new_src_ptr_ty = sema.typeOf(new_src_ptr); @@ -25561,7 +25492,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void assert(src_manyptr_ty_key.flags.size == .One); src_manyptr_ty_key.child = src_elem_ty.toIntern(); src_manyptr_ty_key.flags.size = .Many; - break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(src_manyptr_ty_key), new_src_ptr, src_src); + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(src_manyptr_ty_key), new_src_ptr, src_src); } else new_src_ptr; // ok1: dest >= src + len @@ -25628,7 +25559,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr) orelse break :rs dest_src; const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, "len", .no_embedded_nulls), dest_src); const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src; - const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, sema)).?; + const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, .sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. @@ -25808,7 +25739,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (val.isGenericPoison()) { break :blk null; } - const alignment = try sema.validateAlignAllowZero(block, align_src, try val.toUnsignedIntAdvanced(sema)); + const alignment = try sema.validateAlignAllowZero(block, align_src, try val.toUnsignedIntSema(mod)); const default = target_util.defaultFunctionAlignment(target); break :blk if (alignment == default) .none else alignment; } else if (extra.data.bits.has_align_ref) blk: { @@ -25828,7 +25759,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A error.GenericPoison => break :blk null, else => |e| return e, }; - const alignment = try sema.validateAlignAllowZero(block, align_src, try align_val.toUnsignedIntAdvanced(sema)); + const alignment = try sema.validateAlignAllowZero(block, align_src, try align_val.toUnsignedIntSema(mod)); const default = target_util.defaultFunctionAlignment(target); break :blk if (alignment == default) .none else alignment; } else .none; @@ -25904,7 +25835,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const body = sema.code.bodySlice(extra_index, body_len); extra_index += body.len; - const cc_ty = try sema.getBuiltinType("CallingConvention"); + const cc_ty = try mod.getBuiltinType("CallingConvention"); const val = try sema.resolveGenericBody(block, cc_src, body, inst, cc_ty, .{ .needed_comptime_reason = "calling convention must be comptime-known", }); @@ -26117,7 +26048,7 @@ fn resolvePrefetchOptions( const mod = sema.mod; const gpa = sema.gpa; const ip = &mod.intern_pool; - const options_ty = try sema.getBuiltinType("PrefetchOptions"); + const options_ty = try mod.getBuiltinType("PrefetchOptions"); const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src); const rw_src = block.src(.{ .init_field_rw = src.offset.node_offset_builtin_call_arg.builtin_call_node }); @@ -26141,7 +26072,7 @@ fn resolvePrefetchOptions( return std.builtin.PrefetchOptions{ .rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val), - .locality = @intCast(try locality_val.toUnsignedIntAdvanced(sema)), + .locality = @intCast(try locality_val.toUnsignedIntSema(mod)), .cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val), }; } @@ -26189,7 +26120,7 @@ fn resolveExternOptions( const gpa = sema.gpa; const ip = &mod.intern_pool; const options_inst = try sema.resolveInst(zir_ref); - const extern_options_ty = try sema.getBuiltinType("ExternOptions"); + const extern_options_ty = try mod.getBuiltinType("ExternOptions"); const options = try sema.coerce(block, extern_options_ty, options_inst, src); const name_src = block.src(.{ .init_field_name = src.offset.node_offset_builtin_call_arg.builtin_call_node }); @@ -26440,7 +26371,7 @@ fn explainWhyTypeIsComptime( var type_set = TypeSet{}; defer type_set.deinit(sema.gpa); - try sema.resolveTypeFully(ty); + try ty.resolveFully(sema.mod); return sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty, &type_set); } @@ -26567,7 +26498,7 @@ const ExternPosition = enum { /// Returns true if `ty` is allowed in extern types. /// Does *NOT* require `ty` to be resolved in any way. -/// Calls `resolveTypeLayout` for packed containers. +/// Calls `resolveLayout` for packed containers. fn validateExternType( sema: *Sema, ty: Type, @@ -26618,7 +26549,7 @@ fn validateExternType( .Struct, .Union => switch (ty.containerLayout(mod)) { .@"extern" => return true, .@"packed" => { - const bit_size = try ty.bitSizeAdvanced(mod, sema); + const bit_size = try ty.bitSizeAdvanced(mod, .sema); switch (bit_size) { 0, 8, 16, 32, 64, 128 => return true, else => return false, @@ -26796,11 +26727,11 @@ fn explainWhyTypeIsNotPacked( } } -fn prepareSimplePanic(sema: *Sema, block: *Block) !void { +fn prepareSimplePanic(sema: *Sema) !void { const mod = sema.mod; if (mod.panic_func_index == .none) { - const decl_index = (try sema.getBuiltinDecl(block, "panic")); + const decl_index = (try mod.getBuiltinDecl("panic")); // decl_index may be an alias; we must find the decl that actually // owns the function. try sema.ensureDeclAnalyzed(decl_index); @@ -26813,10 +26744,10 @@ fn prepareSimplePanic(sema: *Sema, block: *Block) !void { } if (mod.null_stack_trace == .none) { - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const target = mod.getTarget(); - const ptr_stack_trace_ty = try sema.ptrType(.{ + const ptr_stack_trace_ty = try mod.ptrTypeSema(.{ .child = stack_trace_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .global_constant), @@ -26838,9 +26769,9 @@ fn preparePanicId(sema: *Sema, block: *Block, panic_id: Module.PanicId) !InternP const gpa = sema.gpa; if (mod.panic_messages[@intFromEnum(panic_id)].unwrap()) |x| return x; - try sema.prepareSimplePanic(block); + try sema.prepareSimplePanic(); - const panic_messages_ty = try sema.getBuiltinType("panic_messages"); + const panic_messages_ty = try mod.getBuiltinType("panic_messages"); const msg_decl_index = (sema.namespaceLookup( block, LazySrcLoc.unneeded, @@ -26946,7 +26877,7 @@ fn panicWithMsg(sema: *Sema, block: *Block, src: LazySrcLoc, msg_inst: Air.Inst. return; } - try sema.prepareSimplePanic(block); + try sema.prepareSimplePanic(); const panic_func = mod.funcInfo(mod.panic_func_index); const panic_fn = try sema.analyzeDeclVal(block, src, panic_func.owner_decl); @@ -26992,7 +26923,7 @@ fn panicUnwrapError( if (!sema.mod.backendSupportsFeature(.panic_unwrap_error)) { _ = try fail_block.addNoOp(.trap); } else { - const panic_fn = try sema.getBuiltin("panicUnwrapError"); + const panic_fn = try sema.mod.getBuiltin("panicUnwrapError"); const err = try fail_block.addTyOp(unwrap_err_tag, Type.anyerror, operand); const err_return_trace = try sema.getErrorReturnTrace(&fail_block); const args: [2]Air.Inst.Ref = .{ err_return_trace, err }; @@ -27051,7 +26982,7 @@ fn panicSentinelMismatch( const actual_sentinel = if (ptr_ty.isSlice(mod)) try parent_block.addBinOp(.slice_elem_val, ptr, sentinel_index) else blk: { - const elem_ptr_ty = try sema.elemPtrType(ptr_ty, null); + const elem_ptr_ty = try ptr_ty.elemPtrType(null, mod); const sentinel_ptr = try parent_block.addPtrElemPtr(ptr, sentinel_index, elem_ptr_ty); break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr); }; @@ -27069,7 +27000,7 @@ fn panicSentinelMismatch( } else if (sentinel_ty.isSelfComparable(mod, true)) try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel) else { - const panic_fn = try sema.getBuiltin("checkNonScalarSentinel"); + const panic_fn = try mod.getBuiltin("checkNonScalarSentinel"); const args: [2]Air.Inst.Ref = .{ expected_sentinel, actual_sentinel }; try sema.callBuiltin(parent_block, src, panic_fn, .auto, &args, .@"safety check"); return; @@ -27108,7 +27039,7 @@ fn safetyCheckFormatted( if (!sema.mod.backendSupportsFeature(.safety_check_formatted)) { _ = try fail_block.addNoOp(.trap); } else { - const panic_fn = try sema.getBuiltin(func); + const panic_fn = try sema.mod.getBuiltin(func); try sema.callBuiltin(&fail_block, src, panic_fn, .auto, args, .@"safety check"); } try sema.addSafetyCheckExtra(parent_block, ok, &fail_block); @@ -27170,7 +27101,7 @@ fn fieldVal( return Air.internedToRef((try mod.intValue(Type.usize, inner_ty.arrayLen(mod))).toIntern()); } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); - const result_ty = try sema.ptrType(.{ + const result_ty = try mod.ptrTypeSema(.{ .child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(), .sentinel = if (inner_ty.sentinel(mod)) |s| s.toIntern() else .none, .flags = .{ @@ -27267,7 +27198,7 @@ fn fieldVal( if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { return inst; } - try sema.resolveTypeFields(child_type); + try child_type.resolveFields(mod); if (child_type.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| { const field_index: u32 = @intCast(field_index_usize); @@ -27361,7 +27292,7 @@ fn fieldPtr( return anonDeclRef(sema, int_val.toIntern()); } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); - const new_ptr_ty = try sema.ptrType(.{ + const new_ptr_ty = try mod.ptrTypeSema(.{ .child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(), .sentinel = if (object_ty.sentinel(mod)) |s| s.toIntern() else .none, .flags = .{ @@ -27376,7 +27307,7 @@ fn fieldPtr( .packed_offset = ptr_info.packed_offset, }); const ptr_ptr_info = object_ptr_ty.ptrInfo(mod); - const result_ty = try sema.ptrType(.{ + const result_ty = try mod.ptrTypeSema(.{ .child = new_ptr_ty.toIntern(), .sentinel = if (object_ptr_ty.sentinel(mod)) |s| s.toIntern() else .none, .flags = .{ @@ -27410,7 +27341,7 @@ fn fieldPtr( if (field_name.eqlSlice("ptr", ip)) { const slice_ptr_ty = inner_ty.slicePtrFieldType(mod); - const result_ty = try sema.ptrType(.{ + const result_ty = try mod.ptrTypeSema(.{ .child = slice_ptr_ty.toIntern(), .flags = .{ .is_const = !attr_ptr_ty.ptrIsMutable(mod), @@ -27420,7 +27351,7 @@ fn fieldPtr( }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return Air.internedToRef((try val.ptrField(Value.slice_ptr_index, sema)).toIntern()); + return Air.internedToRef((try val.ptrField(Value.slice_ptr_index, mod)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); @@ -27428,7 +27359,7 @@ fn fieldPtr( try sema.checkKnownAllocPtr(block, inner_ptr, field_ptr); return field_ptr; } else if (field_name.eqlSlice("len", ip)) { - const result_ty = try sema.ptrType(.{ + const result_ty = try mod.ptrTypeSema(.{ .child = .usize_type, .flags = .{ .is_const = !attr_ptr_ty.ptrIsMutable(mod), @@ -27438,7 +27369,7 @@ fn fieldPtr( }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return Air.internedToRef((try val.ptrField(Value.slice_len_index, sema)).toIntern()); + return Air.internedToRef((try val.ptrField(Value.slice_len_index, mod)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); @@ -27506,7 +27437,7 @@ fn fieldPtr( if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { return inst; } - try sema.resolveTypeFields(child_type); + try child_type.resolveFields(mod); if (child_type.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| { const field_index_u32: u32 = @intCast(field_index); @@ -27601,7 +27532,7 @@ fn fieldCallBind( find_field: { switch (concrete_ty.zigTypeTag(mod)) { .Struct => { - try sema.resolveTypeFields(concrete_ty); + try concrete_ty.resolveFields(mod); if (mod.typeToStruct(concrete_ty)) |struct_type| { const field_index = struct_type.nameIndex(ip, field_name) orelse break :find_field; @@ -27627,7 +27558,7 @@ fn fieldCallBind( } }, .Union => { - try sema.resolveTypeFields(concrete_ty); + try concrete_ty.resolveFields(mod); const union_obj = mod.typeToUnion(concrete_ty).?; _ = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse break :find_field; const field_ptr = try unionFieldPtr(sema, block, src, object_ptr, field_name, field_name_src, concrete_ty, false); @@ -27737,7 +27668,7 @@ fn finishFieldCallBind( object_ptr: Air.Inst.Ref, ) CompileError!ResolvedFieldCallee { const mod = sema.mod; - const ptr_field_ty = try sema.ptrType(.{ + const ptr_field_ty = try mod.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = !ptr_ty.ptrIsMutable(mod), @@ -27748,14 +27679,14 @@ fn finishFieldCallBind( const container_ty = ptr_ty.childType(mod); if (container_ty.zigTypeTag(mod) == .Struct) { if (container_ty.structFieldIsComptime(field_index, mod)) { - try sema.resolveStructFieldInits(container_ty); + try container_ty.resolveStructFieldInits(mod); const default_val = (try container_ty.structFieldValueComptime(mod, field_index)).?; return .{ .direct = Air.internedToRef(default_val.toIntern()) }; } } if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| { - const ptr_val = try struct_ptr_val.ptrField(field_index, sema); + const ptr_val = try struct_ptr_val.ptrField(field_index, mod); const pointer = Air.internedToRef(ptr_val.toIntern()); return .{ .direct = try sema.analyzeLoad(block, src, pointer, src) }; } @@ -27831,8 +27762,8 @@ fn structFieldPtr( const ip = &mod.intern_pool; assert(struct_ty.zigTypeTag(mod) == .Struct); - try sema.resolveTypeFields(struct_ty); - try sema.resolveStructLayout(struct_ty); + try struct_ty.resolveFields(mod); + try struct_ty.resolveLayout(mod); if (struct_ty.isTuple(mod)) { if (field_name.eqlSlice("len", ip)) { @@ -27871,7 +27802,7 @@ fn structFieldPtrByIndex( } if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { - const val = try struct_ptr_val.ptrField(field_index, sema); + const val = try struct_ptr_val.ptrField(field_index, mod); return Air.internedToRef(val.toIntern()); } @@ -27915,10 +27846,11 @@ fn structFieldPtrByIndex( @enumFromInt(@min(@intFromEnum(parent_align), @ctz(field_offset))); } else { // Our alignment is capped at the field alignment. - const field_align = try sema.structFieldAlignment( + const field_align = try mod.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, field_index), Type.fromInterned(field_ty), struct_type.layout, + .sema, ); ptr_ty_data.flags.alignment = if (struct_ptr_ty_info.flags.alignment == .none) field_align @@ -27926,10 +27858,10 @@ fn structFieldPtrByIndex( field_align.min(parent_align); } - const ptr_field_ty = try sema.ptrType(ptr_ty_data); + const ptr_field_ty = try mod.ptrTypeSema(ptr_ty_data); if (struct_type.fieldIsComptime(ip, field_index)) { - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const val = try mod.intern(.{ .ptr = .{ .ty = ptr_field_ty.toIntern(), .base_addr = .{ .comptime_field = struct_type.field_inits.get(ip)[field_index] }, @@ -27955,7 +27887,7 @@ fn structFieldVal( const ip = &mod.intern_pool; assert(struct_ty.zigTypeTag(mod) == .Struct); - try sema.resolveTypeFields(struct_ty); + try struct_ty.resolveFields(mod); switch (ip.indexToKey(struct_ty.toIntern())) { .struct_type => { @@ -27966,7 +27898,7 @@ fn structFieldVal( const field_index = struct_type.nameIndex(ip, field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_ty, struct_type, field_name_src, field_name); if (struct_type.fieldIsComptime(ip, field_index)) { - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); return Air.internedToRef(struct_type.field_inits.get(ip)[field_index]); } @@ -27983,7 +27915,7 @@ fn structFieldVal( } try sema.requireRuntimeBlock(block, src, null); - try sema.resolveTypeLayout(field_ty); + try field_ty.resolveLayout(mod); return block.addStructFieldVal(struct_byval, field_index, field_ty); }, .anon_struct_type => |anon_struct| { @@ -28050,7 +27982,7 @@ fn tupleFieldValByIndex( const field_ty = tuple_ty.structFieldType(field_index, mod); if (tuple_ty.structFieldIsComptime(field_index, mod)) - try sema.resolveStructFieldInits(tuple_ty); + try tuple_ty.resolveStructFieldInits(mod); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return Air.internedToRef(default_value.toIntern()); } @@ -28071,7 +28003,7 @@ fn tupleFieldValByIndex( } try sema.requireRuntimeBlock(block, src, null); - try sema.resolveTypeLayout(field_ty); + try field_ty.resolveLayout(mod); return block.addStructFieldVal(tuple_byval, field_index, field_ty); } @@ -28092,11 +28024,11 @@ fn unionFieldPtr( const union_ptr_ty = sema.typeOf(union_ptr); const union_ptr_info = union_ptr_ty.ptrInfo(mod); - try sema.resolveTypeFields(union_ty); + try union_ty.resolveFields(mod); const union_obj = mod.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - const ptr_field_ty = try sema.ptrType(.{ + const ptr_field_ty = try mod.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = union_ptr_info.flags.is_const, @@ -28107,7 +28039,7 @@ fn unionFieldPtr( union_ptr_info.flags.alignment else try sema.typeAbiAlignment(union_ty); - const field_align = try sema.unionFieldAlignment(union_obj, field_index); + const field_align = try mod.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema); break :blk union_align.min(field_align); } else union_ptr_info.flags.alignment, }, @@ -28163,7 +28095,7 @@ fn unionFieldPtr( }, .@"packed", .@"extern" => {}, } - const field_ptr_val = try union_ptr_val.ptrField(field_index, sema); + const field_ptr_val = try union_ptr_val.ptrField(field_index, mod); return Air.internedToRef(field_ptr_val.toIntern()); } @@ -28198,7 +28130,7 @@ fn unionFieldVal( const ip = &zcu.intern_pool; assert(union_ty.zigTypeTag(zcu) == .Union); - try sema.resolveTypeFields(union_ty); + try union_ty.resolveFields(zcu); const union_obj = zcu.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); @@ -28237,7 +28169,7 @@ fn unionFieldVal( .@"packed" => if (tag_matches) { // Fast path - no need to use bitcast logic. return Air.internedToRef(un.val); - } else if (try sema.bitCastVal(union_val, field_ty, 0, try union_ty.bitSizeAdvanced(zcu, sema), 0)) |field_val| { + } else if (try sema.bitCastVal(union_val, field_ty, 0, try union_ty.bitSizeAdvanced(zcu, .sema), 0)) |field_val| { return Air.internedToRef(field_val.toIntern()); }, } @@ -28256,7 +28188,7 @@ fn unionFieldVal( _ = try block.addNoOp(.unreach); return .unreachable_value; } - try sema.resolveTypeLayout(field_ty); + try field_ty.resolveLayout(zcu); return block.addStructFieldVal(union_byval, field_index, field_ty); } @@ -28287,7 +28219,7 @@ fn elemPtr( const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{ .needed_comptime_reason = "tuple field access index must be comptime-known", }); - const index: u32 = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod)); break :blk try sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init); }, else => { @@ -28325,11 +28257,11 @@ fn elemPtrOneLayerOnly( const runtime_src = rs: { const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); - const elem_ptr = try ptr_val.ptrElem(index, sema); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); + const elem_ptr = try ptr_val.ptrElem(index, mod); return Air.internedToRef(elem_ptr.toIntern()); }; - const result_ty = try sema.elemPtrType(indexable_ty, null); + const result_ty = try indexable_ty.elemPtrType(null, mod); try sema.requireRuntimeBlock(block, src, runtime_src); return block.addPtrElemPtr(indexable, elem_index, result_ty); @@ -28343,7 +28275,7 @@ fn elemPtrOneLayerOnly( const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{ .needed_comptime_reason = "tuple field access index must be comptime-known", }); - const index: u32 = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod)); break :blk try sema.tupleFieldPtr(block, indexable_src, indexable, elem_index_src, index, false); }, else => unreachable, // Guaranteed by checkIndexable @@ -28383,12 +28315,12 @@ fn elemVal( const runtime_src = rs: { const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); const elem_ty = indexable_ty.elemType2(mod); const many_ptr_ty = try mod.manyConstPtrType(elem_ty); const many_ptr_val = try mod.getCoerced(indexable_val, many_ptr_ty); const elem_ptr_ty = try mod.singleConstPtrType(elem_ty); - const elem_ptr_val = try many_ptr_val.ptrElem(index, sema); + const elem_ptr_val = try many_ptr_val.ptrElem(index, mod); if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return Air.internedToRef((try mod.getCoerced(elem_val, elem_ty)).toIntern()); } @@ -28404,7 +28336,7 @@ fn elemVal( if (inner_ty.zigTypeTag(mod) != .Array) break :arr_sent; const sentinel = inner_ty.sentinel(mod) orelse break :arr_sent; const index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index) orelse break :arr_sent; - const index = try sema.usizeCast(block, src, try index_val.toUnsignedIntAdvanced(sema)); + const index = try sema.usizeCast(block, src, try index_val.toUnsignedIntSema(mod)); if (index != inner_ty.arrayLen(mod)) break :arr_sent; return Air.internedToRef(sentinel.toIntern()); } @@ -28422,7 +28354,7 @@ fn elemVal( const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{ .needed_comptime_reason = "tuple field access index must be comptime-known", }); - const index: u32 = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod)); return sema.tupleField(block, indexable_src, indexable, elem_index_src, index); }, else => unreachable, @@ -28467,7 +28399,7 @@ fn tupleFieldPtr( const mod = sema.mod; const tuple_ptr_ty = sema.typeOf(tuple_ptr); const tuple_ty = tuple_ptr_ty.childType(mod); - try sema.resolveTypeFields(tuple_ty); + try tuple_ty.resolveFields(mod); const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { @@ -28481,7 +28413,7 @@ fn tupleFieldPtr( } const field_ty = tuple_ty.structFieldType(field_index, mod); - const ptr_field_ty = try sema.ptrType(.{ + const ptr_field_ty = try mod.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = !tuple_ptr_ty.ptrIsMutable(mod), @@ -28491,7 +28423,7 @@ fn tupleFieldPtr( }); if (tuple_ty.structFieldIsComptime(field_index, mod)) - try sema.resolveStructFieldInits(tuple_ty); + try tuple_ty.resolveStructFieldInits(mod); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { return Air.internedToRef((try mod.intern(.{ .ptr = .{ @@ -28502,7 +28434,7 @@ fn tupleFieldPtr( } if (try sema.resolveValue(tuple_ptr)) |tuple_ptr_val| { - const field_ptr_val = try tuple_ptr_val.ptrField(field_index, sema); + const field_ptr_val = try tuple_ptr_val.ptrField(field_index, mod); return Air.internedToRef(field_ptr_val.toIntern()); } @@ -28524,7 +28456,7 @@ fn tupleField( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const tuple_ty = sema.typeOf(tuple); - try sema.resolveTypeFields(tuple_ty); + try tuple_ty.resolveFields(mod); const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { @@ -28540,7 +28472,7 @@ fn tupleField( const field_ty = tuple_ty.structFieldType(field_index, mod); if (tuple_ty.structFieldIsComptime(field_index, mod)) - try sema.resolveStructFieldInits(tuple_ty); + try tuple_ty.resolveStructFieldInits(mod); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return Air.internedToRef(default_value.toIntern()); // comptime field } @@ -28553,7 +28485,7 @@ fn tupleField( try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src); try sema.requireRuntimeBlock(block, tuple_src, null); - try sema.resolveTypeLayout(field_ty); + try field_ty.resolveLayout(mod); return block.addStructFieldVal(tuple, field_index, field_ty); } @@ -28583,7 +28515,7 @@ fn elemValArray( const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); if (maybe_index_val) |index_val| { - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); if (array_sent) |s| { if (index == array_len) { return Air.internedToRef(s.toIntern()); @@ -28599,7 +28531,7 @@ fn elemValArray( return mod.undefRef(elem_ty); } if (maybe_index_val) |index_val| { - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); const elem_val = try array_val.elemValue(mod, index); return Air.internedToRef(elem_val.toIntern()); } @@ -28621,7 +28553,6 @@ fn elemValArray( return Air.internedToRef(elem_val.toIntern()); try sema.requireRuntimeBlock(block, src, runtime_src); - try sema.queueFullTypeResolution(array_ty); return block.addBinOp(.array_elem_val, array, elem_index); } @@ -28650,7 +28581,7 @@ fn elemPtrArray( const maybe_undef_array_ptr_val = try sema.resolveValue(array_ptr); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntAdvanced(sema)); + const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(mod)); if (index >= array_len_s) { const sentinel_label: []const u8 = if (array_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label }); @@ -28658,14 +28589,14 @@ fn elemPtrArray( break :o index; } else null; - const elem_ptr_ty = try sema.elemPtrType(array_ptr_ty, offset); + const elem_ptr_ty = try array_ptr_ty.elemPtrType(offset, mod); if (maybe_undef_array_ptr_val) |array_ptr_val| { if (array_ptr_val.isUndef(mod)) { return mod.undefRef(elem_ptr_ty); } if (offset) |index| { - const elem_ptr = try array_ptr_val.ptrElem(index, sema); + const elem_ptr = try array_ptr_val.ptrElem(index, mod); return Air.internedToRef(elem_ptr.toIntern()); } } @@ -28710,19 +28641,19 @@ fn elemValSlice( if (maybe_slice_val) |slice_val| { runtime_src = elem_index_src; - const slice_len = try slice_val.sliceLen(sema); + const slice_len = try slice_val.sliceLen(mod); const slice_len_s = slice_len + @intFromBool(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); } if (maybe_index_val) |index_val| { - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); if (index >= slice_len_s) { const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_ty = try sema.elemPtrType(slice_ty, index); - const elem_ptr_val = try slice_val.ptrElem(index, sema); + const elem_ptr_ty = try slice_ty.elemPtrType(index, mod); + const elem_ptr_val = try slice_val.ptrElem(index, mod); if (try sema.pointerDeref(block, slice_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return Air.internedToRef(elem_val.toIntern()); } @@ -28735,13 +28666,12 @@ fn elemValSlice( try sema.requireRuntimeBlock(block, src, runtime_src); if (oob_safety and block.wantSafety()) { const len_inst = if (maybe_slice_val) |slice_val| - try mod.intRef(Type.usize, try slice_val.sliceLen(sema)) + try mod.intRef(Type.usize, try slice_val.sliceLen(mod)) else try block.addTyOp(.slice_len, Type.usize, slice); const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op); } - try sema.queueFullTypeResolution(sema.typeOf(slice)); return block.addBinOp(.slice_elem_val, slice, elem_index); } @@ -28762,17 +28692,17 @@ fn elemPtrSlice( const maybe_undef_slice_val = try sema.resolveValue(slice); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntAdvanced(sema)); + const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(mod)); break :o index; } else null; - const elem_ptr_ty = try sema.elemPtrType(slice_ty, offset); + const elem_ptr_ty = try slice_ty.elemPtrType(offset, mod); if (maybe_undef_slice_val) |slice_val| { if (slice_val.isUndef(mod)) { return mod.undefRef(elem_ptr_ty); } - const slice_len = try slice_val.sliceLen(sema); + const slice_len = try slice_val.sliceLen(mod); const slice_len_s = slice_len + @intFromBool(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); @@ -28782,7 +28712,7 @@ fn elemPtrSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.ptrElem(index, sema); + const elem_ptr_val = try slice_val.ptrElem(index, mod); return Air.internedToRef(elem_ptr_val.toIntern()); } } @@ -28795,7 +28725,7 @@ fn elemPtrSlice( const len_inst = len: { if (maybe_undef_slice_val) |slice_val| if (!slice_val.isUndef(mod)) - break :len try mod.intRef(Type.usize, try slice_val.sliceLen(sema)); + break :len try mod.intRef(Type.usize, try slice_val.sliceLen(mod)); break :len try block.addTyOp(.slice_len, Type.usize, slice); }; const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; @@ -28860,9 +28790,9 @@ fn coerceExtra( if (dest_ty.isGenericPoison()) return inst; const zcu = sema.mod; const dest_ty_src = inst_src; // TODO better source location - try sema.resolveTypeFields(dest_ty); + try dest_ty.resolveFields(zcu); const inst_ty = sema.typeOf(inst); - try sema.resolveTypeFields(inst_ty); + try inst_ty.resolveFields(zcu); const target = zcu.getTarget(); // If the types are the same, we can return the operand. if (dest_ty.eql(inst_ty, zcu)) @@ -28876,7 +28806,6 @@ fn coerceExtra( return sema.coerceInMemory(val, dest_ty); } try sema.requireRuntimeBlock(block, inst_src, null); - try sema.queueFullTypeResolution(dest_ty); const new_val = try block.addBitCast(dest_ty, inst); try sema.checkKnownAllocPtr(block, inst, new_val); return new_val; @@ -29172,7 +29101,7 @@ fn coerceExtra( // empty tuple to zero-length slice // note that this allows coercing to a mutable slice. if (inst_child_ty.structFieldCount(zcu) == 0) { - const align_val = try dest_ty.ptrAlignmentAdvanced(zcu, sema); + const align_val = try dest_ty.ptrAlignmentAdvanced(zcu, .sema); return Air.internedToRef(try zcu.intern(.{ .slice = .{ .ty = dest_ty.toIntern(), .ptr = try zcu.intern(.{ .ptr = .{ @@ -29317,7 +29246,7 @@ fn coerceExtra( } break :int; }; - const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, zcu, sema); + const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, zcu, .sema); // TODO implement this compile error //const int_again_val = try result_val.intFromFloat(sema.arena, inst_ty); //if (!int_again_val.eql(val, inst_ty, zcu)) { @@ -30649,7 +30578,6 @@ fn storePtr2( } try sema.requireRuntimeBlock(block, src, runtime_src); - try sema.queueFullTypeResolution(elem_ty); if (ptr_ty.ptrInfo(mod).flags.vector_index == .runtime) { const ptr_inst = ptr.toIndex().?; @@ -30871,10 +30799,10 @@ fn bitCast( operand_src: ?LazySrcLoc, ) CompileError!Air.Inst.Ref { const zcu = sema.mod; - try sema.resolveTypeLayout(dest_ty); + try dest_ty.resolveLayout(zcu); const old_ty = sema.typeOf(inst); - try sema.resolveTypeLayout(old_ty); + try old_ty.resolveLayout(zcu); const dest_bits = dest_ty.bitSize(zcu); const old_bits = old_ty.bitSize(zcu); @@ -31056,7 +30984,7 @@ fn coerceEnumToUnion( const union_obj = mod.typeToUnion(union_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - try sema.resolveTypeFields(field_ty); + try field_ty.resolveFields(mod); if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(inst_src, "cannot initialize 'noreturn' field of union", .{}); @@ -31469,8 +31397,8 @@ fn coerceTupleToStruct( ) !Air.Inst.Ref { const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeFields(struct_ty); - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveFields(mod); + try struct_ty.resolveStructFieldInits(mod); if (struct_ty.isTupleOrAnonStruct(mod)) { return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src); @@ -31817,7 +31745,7 @@ fn analyzeDeclRefInner(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.Decl }); // TODO: if this is a `decl_ref` of a non-variable decl, only depend on decl type try sema.declareDependency(.{ .decl_val = decl_index }); - const ptr_ty = try sema.ptrType(.{ + const ptr_ty = try mod.ptrTypeSema(.{ .child = decl_val.typeOf(mod).toIntern(), .flags = .{ .alignment = owner_decl.alignment, @@ -31864,14 +31792,14 @@ fn analyzeRef( try sema.requireRuntimeBlock(block, src, null); const address_space = target_util.defaultAddressSpace(mod.getTarget(), .local); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try mod.ptrTypeSema(.{ .child = operand_ty.toIntern(), .flags = .{ .is_const = true, .address_space = address_space, }, }); - const mut_ptr_type = try sema.ptrType(.{ + const mut_ptr_type = try mod.ptrTypeSema(.{ .child = operand_ty.toIntern(), .flags = .{ .address_space = address_space }, }); @@ -31979,7 +31907,7 @@ fn analyzeSliceLen( if (slice_val.isUndef(mod)) { return mod.undefRef(Type.usize); } - return mod.intRef(Type.usize, try slice_val.sliceLen(sema)); + return mod.intRef(Type.usize, try slice_val.sliceLen(mod)); } try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.slice_len, Type.usize, slice_inst); @@ -32347,7 +32275,7 @@ fn analyzeSlice( assert(manyptr_ty_key.flags.size == .One); manyptr_ty_key.child = elem_ty.toIntern(); manyptr_ty_key.flags.size = .Many; - break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(manyptr_ty_key), ptr_or_slice, ptr_src); + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(manyptr_ty_key), ptr_or_slice, ptr_src); } else ptr_or_slice; const start = try sema.coerce(block, Type.usize, uncasted_start, start_src); @@ -32416,7 +32344,7 @@ fn analyzeSlice( return sema.fail(block, src, "slice of undefined", .{}); } const has_sentinel = slice_ty.sentinel(mod) != null; - const slice_len = try slice_val.sliceLen(sema); + const slice_len = try slice_val.sliceLen(mod); const len_plus_sent = slice_len + @intFromBool(has_sentinel); const slice_len_val_with_sentinel = try mod.intValue(Type.usize, len_plus_sent); if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) { @@ -32431,7 +32359,7 @@ fn analyzeSlice( "end index {} out of bounds for slice of length {d}{s}", .{ end_val.fmtValue(mod, sema), - try slice_val.sliceLen(sema), + try slice_val.sliceLen(mod), sentinel_label, }, ); @@ -32504,7 +32432,7 @@ fn analyzeSlice( const many_ptr_ty = try mod.manyConstPtrType(elem_ty); const many_ptr_val = try mod.getCoerced(ptr_val, many_ptr_ty); - const elem_ptr = try many_ptr_val.ptrElem(sentinel_index, sema); + const elem_ptr = try many_ptr_val.ptrElem(sentinel_index, mod); const res = try sema.pointerDerefExtra(block, src, elem_ptr); const actual_sentinel = switch (res) { .runtime_load => break :sentinel_check, @@ -32567,9 +32495,9 @@ fn analyzeSlice( const new_allowzero = new_ptr_ty_info.flags.is_allowzero and sema.typeOf(ptr).ptrSize(mod) != .C; if (opt_new_len_val) |new_len_val| { - const new_len_int = try new_len_val.toUnsignedIntAdvanced(sema); + const new_len_int = try new_len_val.toUnsignedIntSema(mod); - const return_ty = try sema.ptrType(.{ + const return_ty = try mod.ptrTypeSema(.{ .child = (try mod.arrayType(.{ .len = new_len_int, .sentinel = if (sentinel) |s| s.toIntern() else .none, @@ -32631,7 +32559,7 @@ fn analyzeSlice( return sema.fail(block, src, "non-zero length slice of undefined pointer", .{}); } - const return_ty = try sema.ptrType(.{ + const return_ty = try mod.ptrTypeSema(.{ .child = elem_ty.toIntern(), .sentinel = if (sentinel) |s| s.toIntern() else .none, .flags = .{ @@ -32659,7 +32587,7 @@ fn analyzeSlice( if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| { // we don't need to add one for sentinels because the // underlying value data includes the sentinel - break :blk try mod.intRef(Type.usize, try slice_val.sliceLen(sema)); + break :blk try mod.intRef(Type.usize, try slice_val.sliceLen(mod)); } const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); @@ -32751,7 +32679,7 @@ fn cmpNumeric( if (lhs_val.isNan(mod) or rhs_val.isNan(mod)) { return if (op == std.math.CompareOperator.neq) .bool_true else .bool_false; } - return if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, sema)) + return if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, .sema)) .bool_true else .bool_false; @@ -32820,11 +32748,11 @@ fn cmpNumeric( // a signed integer with mantissa bits + 1, and if there was any non-integral part of the float, // add/subtract 1. const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| - !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema)) + !(try lhs_val.compareAllWithZeroSema(.gte, mod)) else (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(mod)); const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| - !(try rhs_val.compareAllWithZeroAdvanced(.gte, sema)) + !(try rhs_val.compareAllWithZeroSema(.gte, mod)) else (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(mod)); const dest_int_is_signed = lhs_is_signed or rhs_is_signed; @@ -32972,7 +32900,7 @@ fn compareIntsOnlyPossibleResult( ) Allocator.Error!?bool { const mod = sema.mod; const rhs_info = rhs_ty.intInfo(mod); - const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, sema) catch unreachable; + const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, .sema) catch unreachable; const is_zero = vs_zero == .eq; const is_negative = vs_zero == .lt; const is_positive = vs_zero == .gt; @@ -33136,7 +33064,6 @@ fn wrapErrorUnionPayload( } }))); } try sema.requireRuntimeBlock(block, inst_src, null); - try sema.queueFullTypeResolution(dest_payload_ty); return block.addTyOp(.wrap_errunion_payload, dest_ty, coerced); } @@ -33939,7 +33866,7 @@ fn resolvePeerTypesInner( opt_ptr_info = ptr_info; } - return .{ .success = try sema.ptrType(opt_ptr_info.?) }; + return .{ .success = try mod.ptrTypeSema(opt_ptr_info.?) }; }, .ptr => { @@ -34249,7 +34176,7 @@ fn resolvePeerTypesInner( }, } - return .{ .success = try sema.ptrType(opt_ptr_info.?) }; + return .{ .success = try mod.ptrTypeSema(opt_ptr_info.?) }; }, .func => { @@ -34606,7 +34533,7 @@ fn resolvePeerTypesInner( var comptime_val: ?Value = null; for (peer_tys) |opt_ty| { const struct_ty = opt_ty orelse continue; - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const uncoerced_field_val = try struct_ty.structFieldValueComptime(mod, field_index) orelse { comptime_val = null; @@ -34742,181 +34669,22 @@ pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void { const ip = &mod.intern_pool; const fn_ty_info = mod.typeToFunc(fn_ty).?; - try sema.resolveTypeFully(Type.fromInterned(fn_ty_info.return_type)); + try Type.fromInterned(fn_ty_info.return_type).resolveFully(mod); if (mod.comp.config.any_error_tracing and Type.fromInterned(fn_ty_info.return_type).isError(mod)) { // Ensure the type exists so that backends can assume that. - _ = try sema.getBuiltinType("StackTrace"); + _ = try mod.getBuiltinType("StackTrace"); } for (0..fn_ty_info.param_types.len) |i| { - try sema.resolveTypeFully(Type.fromInterned(fn_ty_info.param_types.get(ip)[i])); + try Type.fromInterned(fn_ty_info.param_types.get(ip)[i]).resolveFully(mod); } } -/// Make it so that calling hash() and eql() on `val` will not assert due -/// to a type not having its layout resolved. fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value { - const mod = sema.mod; - switch (mod.intern_pool.indexToKey(val.toIntern())) { - .int => |int| switch (int.storage) { - .u64, .i64, .big_int => return val, - .lazy_align, .lazy_size => return mod.intValue( - Type.fromInterned(int.ty), - (try val.getUnsignedIntAdvanced(mod, sema)).?, - ), - }, - .slice => |slice| { - const ptr = try sema.resolveLazyValue(Value.fromInterned(slice.ptr)); - const len = try sema.resolveLazyValue(Value.fromInterned(slice.len)); - if (ptr.toIntern() == slice.ptr and len.toIntern() == slice.len) return val; - return Value.fromInterned(try mod.intern(.{ .slice = .{ - .ty = slice.ty, - .ptr = ptr.toIntern(), - .len = len.toIntern(), - } })); - }, - .ptr => |ptr| { - switch (ptr.base_addr) { - .decl, .comptime_alloc, .anon_decl, .int => return val, - .comptime_field => |field_val| { - const resolved_field_val = - (try sema.resolveLazyValue(Value.fromInterned(field_val))).toIntern(); - return if (resolved_field_val == field_val) - val - else - Value.fromInterned((try mod.intern(.{ .ptr = .{ - .ty = ptr.ty, - .base_addr = .{ .comptime_field = resolved_field_val }, - .byte_offset = ptr.byte_offset, - } }))); - }, - .eu_payload, .opt_payload => |base| { - const resolved_base = (try sema.resolveLazyValue(Value.fromInterned(base))).toIntern(); - return if (resolved_base == base) - val - else - Value.fromInterned((try mod.intern(.{ .ptr = .{ - .ty = ptr.ty, - .base_addr = switch (ptr.base_addr) { - .eu_payload => .{ .eu_payload = resolved_base }, - .opt_payload => .{ .opt_payload = resolved_base }, - else => unreachable, - }, - .byte_offset = ptr.byte_offset, - } }))); - }, - .arr_elem, .field => |base_index| { - const resolved_base = (try sema.resolveLazyValue(Value.fromInterned(base_index.base))).toIntern(); - return if (resolved_base == base_index.base) - val - else - Value.fromInterned((try mod.intern(.{ .ptr = .{ - .ty = ptr.ty, - .base_addr = switch (ptr.base_addr) { - .arr_elem => .{ .arr_elem = .{ - .base = resolved_base, - .index = base_index.index, - } }, - .field => .{ .field = .{ - .base = resolved_base, - .index = base_index.index, - } }, - else => unreachable, - }, - .byte_offset = ptr.byte_offset, - } }))); - }, - } - }, - .aggregate => |aggregate| switch (aggregate.storage) { - .bytes => return val, - .elems => |elems| { - var resolved_elems: []InternPool.Index = &.{}; - for (elems, 0..) |elem, i| { - const resolved_elem = (try sema.resolveLazyValue(Value.fromInterned(elem))).toIntern(); - if (resolved_elems.len == 0 and resolved_elem != elem) { - resolved_elems = try sema.arena.alloc(InternPool.Index, elems.len); - @memcpy(resolved_elems[0..i], elems[0..i]); - } - if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem; - } - return if (resolved_elems.len == 0) val else Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = aggregate.ty, - .storage = .{ .elems = resolved_elems }, - } }))); - }, - .repeated_elem => |elem| { - const resolved_elem = (try sema.resolveLazyValue(Value.fromInterned(elem))).toIntern(); - return if (resolved_elem == elem) val else Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = aggregate.ty, - .storage = .{ .repeated_elem = resolved_elem }, - } }))); - }, - }, - .un => |un| { - const resolved_tag = if (un.tag == .none) - .none - else - (try sema.resolveLazyValue(Value.fromInterned(un.tag))).toIntern(); - const resolved_val = (try sema.resolveLazyValue(Value.fromInterned(un.val))).toIntern(); - return if (resolved_tag == un.tag and resolved_val == un.val) - val - else - Value.fromInterned((try mod.intern(.{ .un = .{ - .ty = un.ty, - .tag = resolved_tag, - .val = resolved_val, - } }))); - }, - else => return val, - } -} - -pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { - const mod = sema.mod; - switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .simple_type => |simple_type| return sema.resolveSimpleType(simple_type), - else => {}, - } - switch (ty.zigTypeTag(mod)) { - .Struct => return sema.resolveStructLayout(ty), - .Union => return sema.resolveUnionLayout(ty), - .Array => { - if (ty.arrayLenIncludingSentinel(mod) == 0) return; - const elem_ty = ty.childType(mod); - return sema.resolveTypeLayout(elem_ty); - }, - .Optional => { - const payload_ty = ty.optionalChild(mod); - // In case of querying the ABI alignment of this optional, we will ask - // for hasRuntimeBits() of the payload type, so we need "requires comptime" - // to be known already before this function returns. - _ = try sema.typeRequiresComptime(payload_ty); - return sema.resolveTypeLayout(payload_ty); - }, - .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(mod); - return sema.resolveTypeLayout(payload_ty); - }, - .Fn => { - const info = mod.typeToFunc(ty).?; - if (info.is_generic) { - // Resolving of generic function types is deferred to when - // the function is instantiated. - return; - } - const ip = &mod.intern_pool; - for (0..info.param_types.len) |i| { - const param_ty = info.param_types.get(ip)[i]; - try sema.resolveTypeLayout(Type.fromInterned(param_ty)); - } - try sema.resolveTypeLayout(Type.fromInterned(info.return_type)); - }, - else => {}, - } + return val.resolveLazy(sema.arena, sema.mod); } /// Resolve a struct's alignment only without triggering resolution of its layout. @@ -34925,11 +34693,13 @@ pub fn resolveStructAlignment( sema: *Sema, ty: InternPool.Index, struct_type: InternPool.LoadedStructType, -) CompileError!Alignment { +) SemaError!void { const mod = sema.mod; const ip = &mod.intern_pool; const target = mod.getTarget(); + assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?); + assert(struct_type.flagsPtr(ip).alignment == .none); assert(struct_type.layout != .@"packed"); @@ -34940,7 +34710,7 @@ pub fn resolveStructAlignment( struct_type.flagsPtr(ip).assumed_pointer_aligned = true; const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); struct_type.flagsPtr(ip).alignment = result; - return result; + return; } try sema.resolveTypeFieldsStruct(ty, struct_type); @@ -34952,7 +34722,7 @@ pub fn resolveStructAlignment( struct_type.flagsPtr(ip).assumed_pointer_aligned = true; const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); struct_type.flagsPtr(ip).alignment = result; - return result; + return; } defer struct_type.clearAlignmentWip(ip); @@ -34962,30 +34732,35 @@ pub fn resolveStructAlignment( const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty)) continue; - const field_align = try sema.structFieldAlignment( + const field_align = try mod.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, i), field_ty, struct_type.layout, + .sema, ); result = result.maxStrict(field_align); } struct_type.flagsPtr(ip).alignment = result; - return result; } -fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; const struct_type = zcu.typeToStruct(ty) orelse return; + assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?); + if (struct_type.haveLayout(ip)) return; - try sema.resolveTypeFields(ty); + try ty.resolveFields(zcu); if (struct_type.layout == .@"packed") { - try semaBackingIntType(zcu, struct_type); + semaBackingIntType(zcu, struct_type) catch |err| switch (err) { + error.OutOfMemory, error.AnalysisFail => |e| return e, + error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, + }; return; } @@ -35021,10 +34796,11 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { }, else => return err, }; - field_align.* = try sema.structFieldAlignment( + field_align.* = try zcu.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, i), field_ty, struct_type.layout, + .sema, ); big_align = big_align.maxStrict(field_align.*); } @@ -35160,7 +34936,7 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.LoadedStructType) Co var accumulator: u64 = 0; for (0..struct_type.field_types.len) |i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - accumulator += try field_ty.bitSizeAdvanced(mod, &sema); + accumulator += try field_ty.bitSizeAdvanced(mod, .sema); } break :blk accumulator; }; @@ -35270,11 +35046,13 @@ pub fn resolveUnionAlignment( sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType, -) CompileError!Alignment { +) SemaError!void { const mod = sema.mod; const ip = &mod.intern_pool; const target = mod.getTarget(); + assert(sema.ownerUnit().unwrap().decl == union_type.decl); + assert(!union_type.haveLayout(ip)); if (union_type.flagsPtr(ip).status == .field_types_wip) { @@ -35284,7 +35062,7 @@ pub fn resolveUnionAlignment( union_type.flagsPtr(ip).assumed_pointer_aligned = true; const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); union_type.flagsPtr(ip).alignment = result; - return result; + return; } try sema.resolveTypeFieldsUnion(ty, union_type); @@ -35304,11 +35082,10 @@ pub fn resolveUnionAlignment( } union_type.flagsPtr(ip).alignment = max_align; - return max_align; } /// This logic must be kept in sync with `Module.getUnionLayout`. -fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; @@ -35317,6 +35094,8 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { // Load again, since the tag type might have changed due to resolution. const union_type = ip.loadUnionType(ty.ip_index); + assert(sema.ownerUnit().unwrap().decl == union_type.decl); + switch (union_type.flagsPtr(ip).status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { @@ -35425,53 +35204,15 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { /// Returns `error.AnalysisFail` if any of the types (recursively) failed to /// be resolved. -pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { - const mod = sema.mod; - const ip = &mod.intern_pool; - switch (ty.zigTypeTag(mod)) { - .Pointer => { - return sema.resolveTypeFully(ty.childType(mod)); - }, - .Struct => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .struct_type => try sema.resolveStructFully(ty), - .anon_struct_type => |tuple| { - for (tuple.types.get(ip)) |field_ty| { - try sema.resolveTypeFully(Type.fromInterned(field_ty)); - } - }, - .simple_type => |simple_type| try sema.resolveSimpleType(simple_type), - else => {}, - }, - .Union => return sema.resolveUnionFully(ty), - .Array => return sema.resolveTypeFully(ty.childType(mod)), - .Optional => { - return sema.resolveTypeFully(ty.optionalChild(mod)); - }, - .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload(mod)), - .Fn => { - const info = mod.typeToFunc(ty).?; - if (info.is_generic) { - // Resolving of generic function types is deferred to when - // the function is instantiated. - return; - } - for (0..info.param_types.len) |i| { - const param_ty = info.param_types.get(ip)[i]; - try sema.resolveTypeFully(Type.fromInterned(param_ty)); - } - try sema.resolveTypeFully(Type.fromInterned(info.return_type)); - }, - else => {}, - } -} - -fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveStructFully(sema: *Sema, ty: Type) SemaError!void { try sema.resolveStructLayout(ty); const mod = sema.mod; const ip = &mod.intern_pool; const struct_type = mod.typeToStruct(ty).?; + assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?); + if (struct_type.setFullyResolved(ip)) return; errdefer struct_type.clearFullyResolved(ip); @@ -35481,16 +35222,19 @@ fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void { for (0..struct_type.field_types.len) |i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - try sema.resolveTypeFully(field_ty); + try field_ty.resolveFully(mod); } } -fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void { try sema.resolveUnionLayout(ty); const mod = sema.mod; const ip = &mod.intern_pool; const union_obj = mod.typeToUnion(ty).?; + + assert(sema.ownerUnit().unwrap().decl == union_obj.decl); + switch (union_obj.flagsPtr(ip).status) { .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {}, .fully_resolved_wip, .fully_resolved => return, @@ -35506,7 +35250,7 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { union_obj.flagsPtr(ip).status = .fully_resolved_wip; for (0..union_obj.field_types.len) |field_index| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - try sema.resolveTypeFully(field_ty); + try field_ty.resolveFully(mod); } union_obj.flagsPtr(ip).status = .fully_resolved; } @@ -35515,135 +35259,18 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { _ = try sema.typeRequiresComptime(ty); } -pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!void { - const mod = sema.mod; - const ip = &mod.intern_pool; - const ty_ip = ty.toIntern(); - - switch (ty_ip) { - .none => unreachable, - - .u0_type, - .i0_type, - .u1_type, - .u8_type, - .i8_type, - .u16_type, - .i16_type, - .u29_type, - .u32_type, - .i32_type, - .u64_type, - .i64_type, - .u80_type, - .u128_type, - .i128_type, - .usize_type, - .isize_type, - .c_char_type, - .c_short_type, - .c_ushort_type, - .c_int_type, - .c_uint_type, - .c_long_type, - .c_ulong_type, - .c_longlong_type, - .c_ulonglong_type, - .c_longdouble_type, - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .anyopaque_type, - .bool_type, - .void_type, - .type_type, - .anyerror_type, - .adhoc_inferred_error_set_type, - .comptime_int_type, - .comptime_float_type, - .noreturn_type, - .anyframe_type, - .null_type, - .undefined_type, - .enum_literal_type, - .manyptr_u8_type, - .manyptr_const_u8_type, - .manyptr_const_u8_sentinel_0_type, - .single_const_pointer_to_comptime_int_type, - .slice_const_u8_type, - .slice_const_u8_sentinel_0_type, - .optional_noreturn_type, - .anyerror_void_error_union_type, - .generic_poison_type, - .empty_struct_type, - => {}, - - .undef => unreachable, - .zero => unreachable, - .zero_usize => unreachable, - .zero_u8 => unreachable, - .one => unreachable, - .one_usize => unreachable, - .one_u8 => unreachable, - .four_u8 => unreachable, - .negative_one => unreachable, - .calling_convention_c => unreachable, - .calling_convention_inline => unreachable, - .void_value => unreachable, - .unreachable_value => unreachable, - .null_value => unreachable, - .bool_true => unreachable, - .bool_false => unreachable, - .empty_struct => unreachable, - .generic_poison => unreachable, - - else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) { - .type_struct, - .type_struct_packed, - .type_struct_packed_inits, - => try sema.resolveTypeFieldsStruct(ty_ip, ip.loadStructType(ty_ip)), - - .type_union => try sema.resolveTypeFieldsUnion(Type.fromInterned(ty_ip), ip.loadUnionType(ty_ip)), - .simple_type => try sema.resolveSimpleType(ip.indexToKey(ty_ip).simple_type), - else => {}, - }, - } -} - -/// Fully resolves a simple type. This is usually a nop, but for builtin types with -/// special InternPool indices (such as std.builtin.Type) it will analyze and fully -/// resolve the container type. -fn resolveSimpleType(sema: *Sema, simple_type: InternPool.SimpleType) CompileError!void { - const builtin_type_name: []const u8 = switch (simple_type) { - .atomic_order => "AtomicOrder", - .atomic_rmw_op => "AtomicRmwOp", - .calling_convention => "CallingConvention", - .address_space => "AddressSpace", - .float_mode => "FloatMode", - .reduce_op => "ReduceOp", - .call_modifier => "CallModifer", - .prefetch_options => "PrefetchOptions", - .export_options => "ExportOptions", - .extern_options => "ExternOptions", - .type_info => "Type", - else => return, - }; - // This will fully resolve the type. - _ = try sema.getBuiltinType(builtin_type_name); -} - pub fn resolveTypeFieldsStruct( sema: *Sema, ty: InternPool.Index, struct_type: InternPool.LoadedStructType, -) CompileError!void { +) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; // If there is no owner decl it means the struct has no fields. const owner_decl = struct_type.decl.unwrap() orelse return; + assert(sema.ownerUnit().unwrap().decl == owner_decl); + switch (zcu.declPtr(owner_decl).analysis) { .file_failure, .dependency_failure, @@ -35674,16 +35301,19 @@ pub fn resolveTypeFieldsStruct( } return error.AnalysisFail; }, - else => |e| return e, + error.OutOfMemory => return error.OutOfMemory, + error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; } -pub fn resolveStructFieldInits(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; const struct_type = zcu.typeToStruct(ty) orelse return; const owner_decl = struct_type.decl.unwrap() orelse return; + assert(sema.ownerUnit().unwrap().decl == owner_decl); + // Inits can start as resolved if (struct_type.haveFieldInits(ip)) return; @@ -35706,15 +35336,19 @@ pub fn resolveStructFieldInits(sema: *Sema, ty: Type) CompileError!void { } return error.AnalysisFail; }, - else => |e| return e, + error.OutOfMemory => return error.OutOfMemory, + error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; struct_type.setHaveFieldInits(ip); } -pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType) CompileError!void { +pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; const owner_decl = zcu.declPtr(union_type.decl); + + assert(sema.ownerUnit().unwrap().decl == union_type.decl); + switch (owner_decl.analysis) { .file_failure, .dependency_failure, @@ -35752,7 +35386,8 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load } return error.AnalysisFail; }, - else => |e| return e, + error.OutOfMemory => return error.OutOfMemory, + error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; union_type.flagsPtr(ip).status = .have_field_types; } @@ -36801,106 +36436,6 @@ fn generateUnionTagTypeSimple( return enum_ty; } -fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { - const zcu = sema.mod; - - var block: Block = .{ - .parent = null, - .sema = sema, - .namespace = sema.owner_decl.src_namespace, - .instructions = .{}, - .inlining = null, - .is_comptime = true, - .src_base_inst = sema.owner_decl.zir_decl_index.unwrap() orelse owner: { - assert(sema.owner_decl.has_tv); - assert(sema.owner_decl.owns_tv); - switch (sema.owner_decl.typeOf(zcu).zigTypeTag(zcu)) { - .Type => break :owner sema.owner_decl.val.toType().typeDeclInst(zcu).?, - .Fn => { - const owner = zcu.funcInfo(sema.owner_decl.val.toIntern()).generic_owner; - const generic_owner_decl = zcu.declPtr(zcu.funcInfo(owner).owner_decl); - break :owner generic_owner_decl.zir_decl_index.unwrap().?; - }, - else => unreachable, - } - }, - .type_name_ctx = sema.owner_decl.name, - }; - defer block.instructions.deinit(sema.gpa); - - const src = block.nodeOffset(0); - - const decl_index = try getBuiltinDecl(sema, &block, name); - return sema.analyzeDeclVal(&block, src, decl_index); -} - -fn getBuiltinDecl(sema: *Sema, block: *Block, name: []const u8) CompileError!InternPool.DeclIndex { - const gpa = sema.gpa; - - const src = block.nodeOffset(0); - - const mod = sema.mod; - const ip = &mod.intern_pool; - const std_mod = mod.std_mod; - const std_file = (mod.importPkg(std_mod) catch unreachable).file; - const opt_builtin_inst = (try sema.namespaceLookupRef( - block, - src, - mod.declPtr(std_file.root_decl.unwrap().?).src_namespace.toOptional(), - try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls), - )) orelse @panic("lib/std.zig is corrupt and missing 'builtin'"); - const builtin_inst = try sema.analyzeLoad(block, src, opt_builtin_inst, src); - const builtin_ty = sema.analyzeAsType(block, src, builtin_inst) catch |err| switch (err) { - error.AnalysisFail => std.debug.panic("std.builtin is corrupt", .{}), - else => |e| return e, - }; - const decl_index = (try sema.namespaceLookup( - block, - src, - builtin_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, name, .no_embedded_nulls), - )) orelse std.debug.panic("lib/std/builtin.zig is corrupt and missing '{s}'", .{name}); - return decl_index; -} - -fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type { - const zcu = sema.mod; - const ty_inst = try sema.getBuiltin(name); - - var block: Block = .{ - .parent = null, - .sema = sema, - .namespace = sema.owner_decl.src_namespace, - .instructions = .{}, - .inlining = null, - .is_comptime = true, - .src_base_inst = sema.owner_decl.zir_decl_index.unwrap() orelse owner: { - assert(sema.owner_decl.has_tv); - assert(sema.owner_decl.owns_tv); - switch (sema.owner_decl.typeOf(zcu).zigTypeTag(zcu)) { - .Type => break :owner sema.owner_decl.val.toType().typeDeclInst(zcu).?, - .Fn => { - const owner = zcu.funcInfo(sema.owner_decl.val.toIntern()).generic_owner; - const generic_owner_decl = zcu.declPtr(zcu.funcInfo(owner).owner_decl); - break :owner generic_owner_decl.zir_decl_index.unwrap().?; - }, - else => unreachable, - } - }, - .type_name_ctx = sema.owner_decl.name, - }; - defer block.instructions.deinit(sema.gpa); - - const src = block.nodeOffset(0); - - const result_ty = sema.analyzeAsType(&block, src, ty_inst) catch |err| switch (err) { - error.AnalysisFail => std.debug.panic("std.builtin.{s} is corrupt", .{name}), - else => |e| return e, - }; - try sema.resolveTypeFully(result_ty); // Should not fail - return result_ty; -} - /// There is another implementation of this in `Type.onePossibleValue`. This one /// in `Sema` is for calling during semantic analysis, and performs field resolution /// to get the answer. The one in `Type` is for calling during codegen and asserts @@ -37104,8 +36639,11 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, .struct_type => { + // Resolving the layout first helps to avoid loops. + // If the type has a coherent layout, we can recurse through fields safely. + try ty.resolveLayout(zcu); + const struct_type = ip.loadStructType(ty.toIntern()); - try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type); if (struct_type.field_types.len == 0) { // In this case the struct has no fields at all and @@ -37122,20 +36660,11 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { ); for (field_vals, 0..) |*field_val, i| { if (struct_type.fieldIsComptime(ip, i)) { - try sema.resolveStructFieldInits(ty); + try ty.resolveStructFieldInits(zcu); field_val.* = struct_type.field_inits.get(ip)[i]; continue; } const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - if (field_ty.eql(ty, zcu)) { - const msg = try sema.errMsg( - ty.srcLoc(zcu), - "struct '{}' depends on itself", - .{ty.fmt(zcu)}, - ); - try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{}); - return sema.failWithOwnedErrorMsg(null, msg); - } if (try sema.typeHasOnePossibleValue(field_ty)) |field_opv| { field_val.* = field_opv.toIntern(); } else return null; @@ -37163,8 +36692,11 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, .union_type => { + // Resolving the layout first helps to avoid loops. + // If the type has a coherent layout, we can recurse through fields safely. + try ty.resolveLayout(zcu); + const union_obj = ip.loadUnionType(ty.toIntern()); - try sema.resolveTypeFieldsUnion(ty, union_obj); const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypePtr(ip).*))) orelse return null; if (union_obj.field_types.len == 0) { @@ -37172,15 +36704,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return Value.fromInterned(only); } const only_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]); - if (only_field_ty.eql(ty, zcu)) { - const msg = try sema.errMsg( - ty.srcLoc(zcu), - "union '{}' depends on itself", - .{ty.fmt(zcu)}, - ); - try sema.addFieldErrNote(ty, 0, msg, "while checking this field", .{}); - return sema.failWithOwnedErrorMsg(null, msg); - } const val_val = (try sema.typeHasOnePossibleValue(only_field_ty)) orelse return null; const only = try zcu.intern(.{ .un = .{ @@ -37298,7 +36821,7 @@ fn analyzeComptimeAlloc( // Needed to make an anon decl with type `var_type` (the `finish()` call below). _ = try sema.typeHasOnePossibleValue(var_type); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try mod.ptrTypeSema(.{ .child = var_type.toIntern(), .flags = .{ .alignment = alignment, @@ -37485,64 +37008,28 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { /// `generic_poison` will return false. /// May return false negatives when structs and unions are having their field types resolved. -pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { - return ty.comptimeOnlyAdvanced(sema.mod, sema); +pub fn typeRequiresComptime(sema: *Sema, ty: Type) SemaError!bool { + return ty.comptimeOnlyAdvanced(sema.mod, .sema); } -pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - const mod = sema.mod; - return ty.hasRuntimeBitsAdvanced(mod, false, .{ .sema = sema }) catch |err| switch (err) { +pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) SemaError!bool { + return ty.hasRuntimeBitsAdvanced(sema.mod, false, .sema) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }; } -pub fn typeAbiSize(sema: *Sema, ty: Type) !u64 { - try sema.resolveTypeLayout(ty); +pub fn typeAbiSize(sema: *Sema, ty: Type) SemaError!u64 { + try ty.resolveLayout(sema.mod); return ty.abiSize(sema.mod); } -pub fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!Alignment { - return (try ty.abiAlignmentAdvanced(sema.mod, .{ .sema = sema })).scalar; -} - -/// Not valid to call for packed unions. -/// Keep implementation in sync with `Module.unionFieldNormalAlignment`. -pub fn unionFieldAlignment(sema: *Sema, u: InternPool.LoadedUnionType, field_index: u32) !Alignment { - const mod = sema.mod; - const ip = &mod.intern_pool; - const field_align = u.fieldAlign(ip, field_index); - if (field_align != .none) return field_align; - const field_ty = Type.fromInterned(u.field_types.get(ip)[field_index]); - if (field_ty.isNoReturn(sema.mod)) return .none; - return sema.typeAbiAlignment(field_ty); -} - -/// Keep implementation in sync with `Module.structFieldAlignment`. -pub fn structFieldAlignment( - sema: *Sema, - explicit_alignment: InternPool.Alignment, - field_ty: Type, - layout: std.builtin.Type.ContainerLayout, -) !Alignment { - if (explicit_alignment != .none) - return explicit_alignment; - const mod = sema.mod; - switch (layout) { - .@"packed" => return .none, - .auto => if (mod.getTarget().ofmt != .c) return sema.typeAbiAlignment(field_ty), - .@"extern" => {}, - } - // extern - const ty_abi_align = try sema.typeAbiAlignment(field_ty); - if (field_ty.isAbiInt(mod) and field_ty.intInfo(mod).bits >= 128) { - return ty_abi_align.maxStrict(.@"16"); - } - return ty_abi_align; +pub fn typeAbiAlignment(sema: *Sema, ty: Type) SemaError!Alignment { + return (try ty.abiAlignmentAdvanced(sema.mod, .sema)).scalar; } pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - return ty.fnHasRuntimeBitsAdvanced(sema.mod, sema); + return ty.fnHasRuntimeBitsAdvanced(sema.mod, .sema); } fn unionFieldIndex( @@ -37554,7 +37041,7 @@ fn unionFieldIndex( ) !u32 { const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeFields(union_ty); + try union_ty.resolveFields(mod); const union_obj = mod.typeToUnion(union_ty).?; const field_index = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_ty, union_obj, field_src, field_name); @@ -37570,7 +37057,7 @@ fn structFieldIndex( ) !u32 { const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeFields(struct_ty); + try struct_ty.resolveFields(mod); if (struct_ty.isAnonStruct(mod)) { return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src); } else { @@ -37601,10 +37088,6 @@ fn anonStructFieldIndex( }); } -fn queueFullTypeResolution(sema: *Sema, ty: Type) !void { - try sema.types_to_resolve.put(sema.gpa, ty.toIntern(), {}); -} - /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting /// overflow_idx to the vector index the overflow was at (or 0 for a scalar). fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value { @@ -37662,8 +37145,8 @@ fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, @@ -37752,8 +37235,8 @@ fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, @@ -37836,8 +37319,8 @@ fn intSubWithOverflowScalar( var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -38024,7 +37507,7 @@ fn intFitsInType( fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool { const mod = sema.mod; - if (!(try int_val.compareAllWithZeroAdvanced(.gte, sema))) return false; + if (!(try int_val.compareAllWithZeroSema(.gte, mod))) return false; const end_val = try mod.intValue(tag_ty, end); if (!(try sema.compareAll(int_val, .lt, end_val, tag_ty))) return false; return true; @@ -38094,8 +37577,8 @@ fn intAddWithOverflowScalar( var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -38149,7 +37632,7 @@ fn compareScalar( switch (op) { .eq => return sema.valuesEqual(coerced_lhs, coerced_rhs, ty), .neq => return !(try sema.valuesEqual(coerced_lhs, coerced_rhs, ty)), - else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, mod, sema), + else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, mod, .sema), } } @@ -38185,80 +37668,6 @@ fn compareVector( } }))); } -/// Returns the type of a pointer to an element. -/// Asserts that the type is a pointer, and that the element type is indexable. -/// If the element index is comptime-known, it must be passed in `offset`. -/// For *@Vector(n, T), return *align(a:b:h:v) T -/// For *[N]T, return *T -/// For [*]T, returns *T -/// For []T, returns *T -/// Handles const-ness and address spaces in particular. -/// This code is duplicated in `analyzePtrArithmetic`. -pub fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { - const mod = sema.mod; - const ptr_info = ptr_ty.ptrInfo(mod); - const elem_ty = ptr_ty.elemType2(mod); - const is_allowzero = ptr_info.flags.is_allowzero and (offset orelse 0) == 0; - const parent_ty = ptr_ty.childType(mod); - - const VI = InternPool.Key.PtrType.VectorIndex; - - const vector_info: struct { - host_size: u16 = 0, - alignment: Alignment = .none, - vector_index: VI = .none, - } = if (parent_ty.isVector(mod) and ptr_info.flags.size == .One) blk: { - const elem_bits = elem_ty.bitSize(mod); - if (elem_bits == 0) break :blk .{}; - const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits); - if (!is_packed) break :blk .{}; - - break :blk .{ - .host_size = @intCast(parent_ty.arrayLen(mod)), - .alignment = parent_ty.abiAlignment(mod), - .vector_index = if (offset) |some| @enumFromInt(some) else .runtime, - }; - } else .{}; - - const alignment: Alignment = a: { - // Calculate the new pointer alignment. - if (ptr_info.flags.alignment == .none) { - // In case of an ABI-aligned pointer, any pointer arithmetic - // maintains the same ABI-alignedness. - break :a vector_info.alignment; - } - // If the addend is not a comptime-known value we can still count on - // it being a multiple of the type size. - const elem_size = try sema.typeAbiSize(elem_ty); - const addend = if (offset) |off| elem_size * off else elem_size; - - // The resulting pointer is aligned to the lcd between the offset (an - // arbitrary number) and the alignment factor (always a power of two, - // non zero). - const new_align: Alignment = @enumFromInt(@min( - @ctz(addend), - ptr_info.flags.alignment.toLog2Units(), - )); - assert(new_align != .none); - break :a new_align; - }; - return sema.ptrType(.{ - .child = elem_ty.toIntern(), - .flags = .{ - .alignment = alignment, - .is_const = ptr_info.flags.is_const, - .is_volatile = ptr_info.flags.is_volatile, - .is_allowzero = is_allowzero, - .address_space = ptr_info.flags.address_space, - .vector_index = vector_info.vector_index, - }, - .packed_offset = .{ - .host_size = vector_info.host_size, - .bit_offset = 0, - }, - }); -} - /// Merge lhs with rhs. /// Asserts that lhs and rhs are both error sets and are resolved. fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type { @@ -38299,13 +37708,6 @@ fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool return sema.typeOf(ref).zigTypeTag(sema.mod) == tag; } -pub fn ptrType(sema: *Sema, info: InternPool.Key.PtrType) CompileError!Type { - if (info.flags.alignment != .none) { - _ = try sema.typeAbiAlignment(Type.fromInterned(info.child)); - } - return sema.mod.ptrType(info); -} - pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void { if (!sema.mod.comp.debug_incremental) return; @@ -38425,12 +37827,12 @@ fn maybeDerefSliceAsArray( else => unreachable, }; const elem_ty = Type.fromInterned(slice.ty).childType(zcu); - const len = try Value.fromInterned(slice.len).toUnsignedIntAdvanced(sema); + const len = try Value.fromInterned(slice.len).toUnsignedIntSema(zcu); const array_ty = try zcu.arrayType(.{ .child = elem_ty.toIntern(), .len = len, }); - const ptr_ty = try sema.ptrType(p: { + const ptr_ty = try zcu.ptrTypeSema(p: { var p = Type.fromInterned(slice.ty).ptrInfo(zcu); p.flags.size = .One; p.child = array_ty.toIntern(); diff --git a/src/Sema/bitcast.zig b/src/Sema/bitcast.zig index 9536ee33cd..3c3ccdbfaa 100644 --- a/src/Sema/bitcast.zig +++ b/src/Sema/bitcast.zig @@ -78,8 +78,8 @@ fn bitCastInner( const val_ty = val.typeOf(zcu); - try sema.resolveTypeLayout(val_ty); - try sema.resolveTypeLayout(dest_ty); + try val_ty.resolveLayout(zcu); + try dest_ty.resolveLayout(zcu); assert(val_ty.hasWellDefinedLayout(zcu)); @@ -136,8 +136,8 @@ fn bitCastSpliceInner( const val_ty = val.typeOf(zcu); const splice_val_ty = splice_val.typeOf(zcu); - try sema.resolveTypeLayout(val_ty); - try sema.resolveTypeLayout(splice_val_ty); + try val_ty.resolveLayout(zcu); + try splice_val_ty.resolveLayout(zcu); const splice_bits = splice_val_ty.bitSize(zcu); diff --git a/src/Type.zig b/src/Type.zig index 96c3e055fd..9f11a70bf3 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -5,6 +5,7 @@ const std = @import("std"); const builtin = @import("builtin"); +const Allocator = std.mem.Allocator; const Value = @import("Value.zig"); const assert = std.debug.assert; const Target = std.Target; @@ -18,6 +19,7 @@ const InternPool = @import("InternPool.zig"); const Alignment = InternPool.Alignment; const Zir = std.zig.Zir; const Type = @This(); +const SemaError = Zcu.SemaError; ip_index: InternPool.Index, @@ -458,7 +460,7 @@ pub fn toValue(self: Type) Value { return Value.fromInterned(self.toIntern()); } -const RuntimeBitsError = Module.CompileError || error{NeedLazy}; +const RuntimeBitsError = SemaError || error{NeedLazy}; /// true if and only if the type takes up space in memory at runtime. /// There are two reasons a type will return false: @@ -475,7 +477,7 @@ pub fn hasRuntimeBitsAdvanced( ty: Type, mod: *Module, ignore_comptime_only: bool, - strat: AbiAlignmentAdvancedStrat, + strat: ResolveStratLazy, ) RuntimeBitsError!bool { const ip = &mod.intern_pool; return switch (ty.toIntern()) { @@ -488,8 +490,8 @@ pub fn hasRuntimeBitsAdvanced( // to comptime-only types do not, with the exception of function pointers. if (ignore_comptime_only) return true; return switch (strat) { - .sema => |sema| !(try sema.typeRequiresComptime(ty)), - .eager => !comptimeOnly(ty, mod), + .sema => !try ty.comptimeOnlyAdvanced(mod, .sema), + .eager => !ty.comptimeOnly(mod), .lazy => error.NeedLazy, }; }, @@ -506,8 +508,8 @@ pub fn hasRuntimeBitsAdvanced( } if (ignore_comptime_only) return true; return switch (strat) { - .sema => |sema| !(try sema.typeRequiresComptime(child_ty)), - .eager => !comptimeOnly(child_ty, mod), + .sema => !try child_ty.comptimeOnlyAdvanced(mod, .sema), + .eager => !child_ty.comptimeOnly(mod), .lazy => error.NeedLazy, }; }, @@ -578,7 +580,7 @@ pub fn hasRuntimeBitsAdvanced( return true; } switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), + .sema => try ty.resolveFields(mod), .eager => assert(struct_type.haveFieldTypes(ip)), .lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy, } @@ -622,7 +624,7 @@ pub fn hasRuntimeBitsAdvanced( }, } switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), + .sema => try ty.resolveFields(mod), .eager => assert(union_type.flagsPtr(ip).status.haveFieldTypes()), .lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes()) return error.NeedLazy, @@ -784,19 +786,18 @@ pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { } pub fn fnHasRuntimeBits(ty: Type, mod: *Module) bool { - return ty.fnHasRuntimeBitsAdvanced(mod, null) catch unreachable; + return ty.fnHasRuntimeBitsAdvanced(mod, .normal) catch unreachable; } /// Determines whether a function type has runtime bits, i.e. whether a /// function with this type can exist at runtime. /// Asserts that `ty` is a function type. -/// If `opt_sema` is not provided, asserts that the return type is sufficiently resolved. -pub fn fnHasRuntimeBitsAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.CompileError!bool { +pub fn fnHasRuntimeBitsAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaError!bool { const fn_info = mod.typeToFunc(ty).?; if (fn_info.is_generic) return false; if (fn_info.is_var_args) return true; if (fn_info.cc == .Inline) return false; - return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(mod, opt_sema); + return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(mod, strat); } pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool { @@ -820,23 +821,23 @@ pub fn isNoReturn(ty: Type, mod: *Module) bool { /// Returns `none` if the pointer is naturally aligned and the element type is 0-bit. pub fn ptrAlignment(ty: Type, mod: *Module) Alignment { - return ptrAlignmentAdvanced(ty, mod, null) catch unreachable; + return ptrAlignmentAdvanced(ty, mod, .normal) catch unreachable; } -pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !Alignment { +pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) !Alignment { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| { if (ptr_type.flags.alignment != .none) return ptr_type.flags.alignment; - if (opt_sema) |sema| { - const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .{ .sema = sema }); + if (strat == .sema) { + const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .sema); return res.scalar; } return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; }, - .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(mod, opt_sema), + .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(mod, strat), else => unreachable, }; } @@ -868,10 +869,34 @@ pub const AbiAlignmentAdvanced = union(enum) { val: Value, }; -pub const AbiAlignmentAdvancedStrat = union(enum) { - eager, +pub const ResolveStratLazy = enum { + /// Return a `lazy_size` or `lazy_align` value if necessary. + /// This value can be resolved later using `Value.resolveLazy`. lazy, - sema: *Sema, + /// Return a scalar result, expecting all necessary type resolution to be completed. + /// Backends should typically use this, since they must not perform type resolution. + eager, + /// Return a scalar result, performing type resolution as necessary. + /// This should typically be used from semantic analysis. + sema, +}; + +/// The chosen strategy can be easily optimized away in release builds. +/// However, in debug builds, it helps to avoid acceidentally resolving types in backends. +pub const ResolveStrat = enum { + /// Assert that all necessary resolution is completed. + /// Backends should typically use this, since they must not perform type resolution. + normal, + /// Perform type resolution as necessary using `Zcu`. + /// This should typically be used from semantic analysis. + sema, + + pub fn toLazy(strat: ResolveStrat) ResolveStratLazy { + return switch (strat) { + .normal => .eager, + .sema => .sema, + }; + } }; /// If you pass `eager` you will get back `scalar` and assert the type is resolved. @@ -883,17 +908,12 @@ pub const AbiAlignmentAdvancedStrat = union(enum) { pub fn abiAlignmentAdvanced( ty: Type, mod: *Module, - strat: AbiAlignmentAdvancedStrat, -) Module.CompileError!AbiAlignmentAdvanced { + strat: ResolveStratLazy, +) SemaError!AbiAlignmentAdvanced { const target = mod.getTarget(); const use_llvm = mod.comp.config.use_llvm; const ip = &mod.intern_pool; - const opt_sema = switch (strat) { - .sema => |sema| sema, - else => null, - }; - switch (ty.toIntern()) { .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .@"1" }, else => switch (ip.indexToKey(ty.toIntern())) { @@ -911,7 +931,7 @@ pub fn abiAlignmentAdvanced( if (vector_type.len == 0) return .{ .scalar = .@"1" }; switch (mod.comp.getZigBackend()) { else => { - const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema)); + const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, .sema)); if (elem_bits == 0) return .{ .scalar = .@"1" }; const bytes = ((elem_bits * vector_type.len) + 7) / 8; const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); @@ -1024,7 +1044,7 @@ pub fn abiAlignmentAdvanced( const struct_type = ip.loadStructType(ty.toIntern()); if (struct_type.layout == .@"packed") { switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), + .sema => try ty.resolveLayout(mod), .lazy => if (struct_type.backingIntType(ip).* == .none) return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ .ty = .comptime_int_type, @@ -1036,19 +1056,16 @@ pub fn abiAlignmentAdvanced( return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(mod) }; } - const flags = struct_type.flagsPtr(ip).*; - if (flags.alignment != .none) return .{ .scalar = flags.alignment }; - - return switch (strat) { + if (struct_type.flagsPtr(ip).alignment == .none) switch (strat) { .eager => unreachable, // struct alignment not resolved - .sema => |sema| .{ - .scalar = try sema.resolveStructAlignment(ty.toIntern(), struct_type), - }, - .lazy => .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .sema => try ty.resolveStructAlignment(mod), + .lazy => return .{ .val = Value.fromInterned(try mod.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, + } })) }, }; + + return .{ .scalar = struct_type.flagsPtr(ip).alignment }; }, .anon_struct_type => |tuple| { var big_align: Alignment = .@"1"; @@ -1070,12 +1087,10 @@ pub fn abiAlignmentAdvanced( }, .union_type => { const union_type = ip.loadUnionType(ty.toIntern()); - const flags = union_type.flagsPtr(ip).*; - if (flags.alignment != .none) return .{ .scalar = flags.alignment }; - if (!union_type.haveLayout(ip)) switch (strat) { + if (union_type.flagsPtr(ip).alignment == .none) switch (strat) { .eager => unreachable, // union layout not resolved - .sema => |sema| return .{ .scalar = try sema.resolveUnionAlignment(ty, union_type) }, + .sema => try ty.resolveUnionAlignment(mod), .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_align = ty.toIntern() }, @@ -1117,9 +1132,9 @@ pub fn abiAlignmentAdvanced( fn abiAlignmentAdvancedErrorUnion( ty: Type, mod: *Module, - strat: AbiAlignmentAdvancedStrat, + strat: ResolveStratLazy, payload_ty: Type, -) Module.CompileError!AbiAlignmentAdvanced { +) SemaError!AbiAlignmentAdvanced { // This code needs to be kept in sync with the equivalent switch prong // in abiSizeAdvanced. const code_align = abiAlignment(Type.anyerror, mod); @@ -1154,8 +1169,8 @@ fn abiAlignmentAdvancedErrorUnion( fn abiAlignmentAdvancedOptional( ty: Type, mod: *Module, - strat: AbiAlignmentAdvancedStrat, -) Module.CompileError!AbiAlignmentAdvanced { + strat: ResolveStratLazy, +) SemaError!AbiAlignmentAdvanced { const target = mod.getTarget(); const child_type = ty.optionalChild(mod); @@ -1217,8 +1232,8 @@ const AbiSizeAdvanced = union(enum) { pub fn abiSizeAdvanced( ty: Type, mod: *Module, - strat: AbiAlignmentAdvancedStrat, -) Module.CompileError!AbiSizeAdvanced { + strat: ResolveStratLazy, +) SemaError!AbiSizeAdvanced { const target = mod.getTarget(); const use_llvm = mod.comp.config.use_llvm; const ip = &mod.intern_pool; @@ -1252,9 +1267,9 @@ pub fn abiSizeAdvanced( } }, .vector_type => |vector_type| { - const opt_sema = switch (strat) { - .sema => |sema| sema, - .eager => null, + const sub_strat: ResolveStrat = switch (strat) { + .sema => .sema, + .eager => .normal, .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_size = ty.toIntern() }, @@ -1269,7 +1284,7 @@ pub fn abiSizeAdvanced( }; const total_bytes = switch (mod.comp.getZigBackend()) { else => total_bytes: { - const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema); + const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, sub_strat); const total_bits = elem_bits * vector_type.len; break :total_bytes (total_bits + 7) / 8; }, @@ -1403,7 +1418,7 @@ pub fn abiSizeAdvanced( .struct_type => { const struct_type = ip.loadStructType(ty.toIntern()); switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), + .sema => try ty.resolveLayout(mod), .lazy => switch (struct_type.layout) { .@"packed" => { if (struct_type.backingIntType(ip).* == .none) return .{ @@ -1436,7 +1451,7 @@ pub fn abiSizeAdvanced( }, .anon_struct_type => |tuple| { switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), + .sema => try ty.resolveLayout(mod), .lazy, .eager => {}, } const field_count = tuple.types.len; @@ -1449,7 +1464,7 @@ pub fn abiSizeAdvanced( .union_type => { const union_type = ip.loadUnionType(ty.toIntern()); switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), + .sema => try ty.resolveLayout(mod), .lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ .ty = .comptime_int_type, @@ -1493,8 +1508,8 @@ pub fn abiSizeAdvanced( fn abiSizeAdvancedOptional( ty: Type, mod: *Module, - strat: AbiAlignmentAdvancedStrat, -) Module.CompileError!AbiSizeAdvanced { + strat: ResolveStratLazy, +) SemaError!AbiSizeAdvanced { const child_ty = ty.optionalChild(mod); if (child_ty.isNoReturn(mod)) { @@ -1661,21 +1676,18 @@ pub fn maxIntAlignment(target: std.Target, use_llvm: bool) u16 { } pub fn bitSize(ty: Type, mod: *Module) u64 { - return bitSizeAdvanced(ty, mod, null) catch unreachable; + return bitSizeAdvanced(ty, mod, .normal) catch unreachable; } -/// If you pass `opt_sema`, any recursive type resolutions will happen if -/// necessary, possibly returning a CompileError. Passing `null` instead asserts -/// the type is fully resolved, and there will be no error, guaranteed. pub fn bitSizeAdvanced( ty: Type, mod: *Module, - opt_sema: ?*Sema, -) Module.CompileError!u64 { + strat: ResolveStrat, +) SemaError!u64 { const target = mod.getTarget(); const ip = &mod.intern_pool; - const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; + const strat_lazy: ResolveStratLazy = strat.toLazy(); switch (ip.indexToKey(ty.toIntern())) { .int_type => |int_type| return int_type.bits, @@ -1690,22 +1702,22 @@ pub fn bitSizeAdvanced( if (len == 0) return 0; const elem_ty = Type.fromInterned(array_type.child); const elem_size = @max( - (try elem_ty.abiAlignmentAdvanced(mod, strat)).scalar.toByteUnits() orelse 0, - (try elem_ty.abiSizeAdvanced(mod, strat)).scalar, + (try elem_ty.abiAlignmentAdvanced(mod, strat_lazy)).scalar.toByteUnits() orelse 0, + (try elem_ty.abiSizeAdvanced(mod, strat_lazy)).scalar, ); if (elem_size == 0) return 0; - const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); + const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, strat); return (len - 1) * 8 * elem_size + elem_bit_size; }, .vector_type => |vector_type| { const child_ty = Type.fromInterned(vector_type.child); - const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); + const elem_bit_size = try bitSizeAdvanced(child_ty, mod, strat); return elem_bit_size * vector_type.len; }, .opt_type => { // Optionals and error unions are not packed so their bitsize // includes padding bits. - return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + return (try abiSizeAdvanced(ty, mod, strat_lazy)).scalar * 8; }, .error_set_type, .inferred_error_set_type => return mod.errorSetBits(), @@ -1713,7 +1725,7 @@ pub fn bitSizeAdvanced( .error_union_type => { // Optionals and error unions are not packed so their bitsize // includes padding bits. - return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + return (try abiSizeAdvanced(ty, mod, strat_lazy)).scalar * 8; }, .func_type => unreachable, // represents machine code; not a pointer .simple_type => |t| switch (t) { @@ -1770,43 +1782,43 @@ pub fn bitSizeAdvanced( .struct_type => { const struct_type = ip.loadStructType(ty.toIntern()); const is_packed = struct_type.layout == .@"packed"; - if (opt_sema) |sema| { - try sema.resolveTypeFields(ty); - if (is_packed) try sema.resolveTypeLayout(ty); + if (strat == .sema) { + try ty.resolveFields(mod); + if (is_packed) try ty.resolveLayout(mod); } if (is_packed) { - return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(mod, opt_sema); + return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(mod, strat); } - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8; }, .anon_struct_type => { - if (opt_sema) |sema| try sema.resolveTypeFields(ty); - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + if (strat == .sema) try ty.resolveFields(mod); + return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8; }, .union_type => { const union_type = ip.loadUnionType(ty.toIntern()); const is_packed = ty.containerLayout(mod) == .@"packed"; - if (opt_sema) |sema| { - try sema.resolveTypeFields(ty); - if (is_packed) try sema.resolveTypeLayout(ty); + if (strat == .sema) { + try ty.resolveFields(mod); + if (is_packed) try ty.resolveLayout(mod); } if (!is_packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8; } assert(union_type.flagsPtr(ip).status.haveFieldTypes()); var size: u64 = 0; for (0..union_type.field_types.len) |field_index| { const field_ty = union_type.field_types.get(ip)[field_index]; - size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, opt_sema)); + size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, strat)); } return size; }, .opaque_type => unreachable, - .enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, opt_sema), + .enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, strat), // values, not types .undef, @@ -2722,13 +2734,12 @@ pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { /// During semantic analysis, instead call `Sema.typeRequiresComptime` which /// resolves field types rather than asserting they are already resolved. pub fn comptimeOnly(ty: Type, mod: *Module) bool { - return ty.comptimeOnlyAdvanced(mod, null) catch unreachable; + return ty.comptimeOnlyAdvanced(mod, .normal) catch unreachable; } /// `generic_poison` will return false. /// May return false negatives when structs and unions are having their field types resolved. -/// If `opt_sema` is not provided, asserts that the type is sufficiently resolved. -pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.CompileError!bool { +pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaError!bool { const ip = &mod.intern_pool; return switch (ty.toIntern()) { .empty_struct_type => false, @@ -2738,19 +2749,19 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com .ptr_type => |ptr_type| { const child_ty = Type.fromInterned(ptr_type.child); switch (child_ty.zigTypeTag(mod)) { - .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(mod, opt_sema), + .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(mod, strat), .Opaque => return false, - else => return child_ty.comptimeOnlyAdvanced(mod, opt_sema), + else => return child_ty.comptimeOnlyAdvanced(mod, strat), } }, .anyframe_type => |child| { if (child == .none) return false; - return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema); + return Type.fromInterned(child).comptimeOnlyAdvanced(mod, strat); }, - .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(mod, opt_sema), - .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(mod, opt_sema), - .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema), - .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(mod, opt_sema), + .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(mod, strat), + .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(mod, strat), + .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(mod, strat), + .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(mod, strat), .error_set_type, .inferred_error_set_type, @@ -2817,8 +2828,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com .no, .wip => false, .yes => true, .unknown => { - // The type is not resolved; assert that we have a Sema. - const sema = opt_sema.?; + assert(strat == .sema); if (struct_type.flagsPtr(ip).field_types_wip) return false; @@ -2826,13 +2836,13 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com struct_type.flagsPtr(ip).requires_comptime = .wip; errdefer struct_type.flagsPtr(ip).requires_comptime = .unknown; - try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type); + try ty.resolveFields(mod); for (0..struct_type.field_types.len) |i_usize| { const i: u32 = @intCast(i_usize); if (struct_type.fieldIsComptime(ip, i)) continue; const field_ty = struct_type.field_types.get(ip)[i]; - if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) { + if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) { // Note that this does not cause the layout to // be considered resolved. Comptime-only types // still maintain a layout of their @@ -2851,7 +2861,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com .anon_struct_type => |tuple| { for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { const have_comptime_val = val != .none; - if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) return true; + if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) return true; } return false; }, @@ -2862,8 +2872,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com .no, .wip => return false, .yes => return true, .unknown => { - // The type is not resolved; assert that we have a Sema. - const sema = opt_sema.?; + assert(strat == .sema); if (union_type.flagsPtr(ip).status == .field_types_wip) return false; @@ -2871,11 +2880,11 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com union_type.flagsPtr(ip).requires_comptime = .wip; errdefer union_type.flagsPtr(ip).requires_comptime = .unknown; - try sema.resolveTypeFieldsUnion(ty, union_type); + try ty.resolveFields(mod); for (0..union_type.field_types.len) |field_idx| { const field_ty = union_type.field_types.get(ip)[field_idx]; - if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) { + if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) { union_type.flagsPtr(ip).requires_comptime = .yes; return true; } @@ -2889,7 +2898,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.Com .opaque_type => false, - .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, opt_sema), + .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, strat), // values, not types .undef, @@ -3180,10 +3189,10 @@ pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type { } pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment { - return ty.structFieldAlignAdvanced(index, zcu, null) catch unreachable; + return ty.structFieldAlignAdvanced(index, zcu, .normal) catch unreachable; } -pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, opt_sema: ?*Sema) !Alignment { +pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, strat: ResolveStrat) !Alignment { const ip = &zcu.intern_pool; switch (ip.indexToKey(ty.toIntern())) { .struct_type => { @@ -3191,22 +3200,14 @@ pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, opt_sema: ?*S assert(struct_type.layout != .@"packed"); const explicit_align = struct_type.fieldAlign(ip, index); const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]); - if (opt_sema) |sema| { - return sema.structFieldAlignment(explicit_align, field_ty, struct_type.layout); - } else { - return zcu.structFieldAlignment(explicit_align, field_ty, struct_type.layout); - } + return zcu.structFieldAlignmentAdvanced(explicit_align, field_ty, struct_type.layout, strat); }, .anon_struct_type => |anon_struct| { - return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, if (opt_sema) |sema| .{ .sema = sema } else .eager)).scalar; + return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, strat.toLazy())).scalar; }, .union_type => { const union_obj = ip.loadUnionType(ty.toIntern()); - if (opt_sema) |sema| { - return sema.unionFieldAlignment(union_obj, @intCast(index)); - } else { - return zcu.unionFieldNormalAlignment(union_obj, @intCast(index)); - } + return zcu.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(index), strat); }, else => unreachable, } @@ -3546,6 +3547,397 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: } }; } +pub fn resolveLayout(ty: Type, zcu: *Zcu) SemaError!void { + const ip = &zcu.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .simple_type => |simple_type| return resolveSimpleType(simple_type, zcu), + else => {}, + } + switch (ty.zigTypeTag(zcu)) { + .Struct => switch (ip.indexToKey(ty.toIntern())) { + .anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| { + const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]); + try field_ty.resolveLayout(zcu); + }, + .struct_type => return ty.resolveStructInner(zcu, .layout), + else => unreachable, + }, + .Union => return ty.resolveUnionInner(zcu, .layout), + .Array => { + if (ty.arrayLenIncludingSentinel(zcu) == 0) return; + const elem_ty = ty.childType(zcu); + return elem_ty.resolveLayout(zcu); + }, + .Optional => { + const payload_ty = ty.optionalChild(zcu); + return payload_ty.resolveLayout(zcu); + }, + .ErrorUnion => { + const payload_ty = ty.errorUnionPayload(zcu); + return payload_ty.resolveLayout(zcu); + }, + .Fn => { + const info = zcu.typeToFunc(ty).?; + if (info.is_generic) { + // Resolving of generic function types is deferred to when + // the function is instantiated. + return; + } + for (0..info.param_types.len) |i| { + const param_ty = info.param_types.get(ip)[i]; + try Type.fromInterned(param_ty).resolveLayout(zcu); + } + try Type.fromInterned(info.return_type).resolveLayout(zcu); + }, + else => {}, + } +} + +pub fn resolveFields(ty: Type, zcu: *Zcu) SemaError!void { + const ip = &zcu.intern_pool; + const ty_ip = ty.toIntern(); + + switch (ty_ip) { + .none => unreachable, + + .u0_type, + .i0_type, + .u1_type, + .u8_type, + .i8_type, + .u16_type, + .i16_type, + .u29_type, + .u32_type, + .i32_type, + .u64_type, + .i64_type, + .u80_type, + .u128_type, + .i128_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .anyopaque_type, + .bool_type, + .void_type, + .type_type, + .anyerror_type, + .adhoc_inferred_error_set_type, + .comptime_int_type, + .comptime_float_type, + .noreturn_type, + .anyframe_type, + .null_type, + .undefined_type, + .enum_literal_type, + .manyptr_u8_type, + .manyptr_const_u8_type, + .manyptr_const_u8_sentinel_0_type, + .single_const_pointer_to_comptime_int_type, + .slice_const_u8_type, + .slice_const_u8_sentinel_0_type, + .optional_noreturn_type, + .anyerror_void_error_union_type, + .generic_poison_type, + .empty_struct_type, + => {}, + + .undef => unreachable, + .zero => unreachable, + .zero_usize => unreachable, + .zero_u8 => unreachable, + .one => unreachable, + .one_usize => unreachable, + .one_u8 => unreachable, + .four_u8 => unreachable, + .negative_one => unreachable, + .calling_convention_c => unreachable, + .calling_convention_inline => unreachable, + .void_value => unreachable, + .unreachable_value => unreachable, + .null_value => unreachable, + .bool_true => unreachable, + .bool_false => unreachable, + .empty_struct => unreachable, + .generic_poison => unreachable, + + else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) { + .type_struct, + .type_struct_packed, + .type_struct_packed_inits, + => return ty.resolveStructInner(zcu, .fields), + + .type_union => return ty.resolveUnionInner(zcu, .fields), + + .simple_type => return resolveSimpleType(ip.indexToKey(ty_ip).simple_type, zcu), + + else => {}, + }, + } +} + +pub fn resolveFully(ty: Type, zcu: *Zcu) SemaError!void { + const ip = &zcu.intern_pool; + + switch (ip.indexToKey(ty.toIntern())) { + .simple_type => |simple_type| return resolveSimpleType(simple_type, zcu), + else => {}, + } + + switch (ty.zigTypeTag(zcu)) { + .Type, + .Void, + .Bool, + .NoReturn, + .Int, + .Float, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .ErrorSet, + .Enum, + .Opaque, + .Frame, + .AnyFrame, + .Vector, + .EnumLiteral, + => {}, + + .Pointer => return ty.childType(zcu).resolveFully(zcu), + .Array => return ty.childType(zcu).resolveFully(zcu), + .Optional => return ty.optionalChild(zcu).resolveFully(zcu), + .ErrorUnion => return ty.errorUnionPayload(zcu).resolveFully(zcu), + .Fn => { + const info = zcu.typeToFunc(ty).?; + if (info.is_generic) return; + for (0..info.param_types.len) |i| { + const param_ty = info.param_types.get(ip)[i]; + try Type.fromInterned(param_ty).resolveFully(zcu); + } + try Type.fromInterned(info.return_type).resolveFully(zcu); + }, + + .Struct => switch (ip.indexToKey(ty.toIntern())) { + .anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| { + const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]); + try field_ty.resolveFully(zcu); + }, + .struct_type => return ty.resolveStructInner(zcu, .full), + else => unreachable, + }, + .Union => return ty.resolveUnionInner(zcu, .full), + } +} + +pub fn resolveStructFieldInits(ty: Type, zcu: *Zcu) SemaError!void { + // TODO: stop calling this for tuples! + _ = zcu.typeToStruct(ty) orelse return; + return ty.resolveStructInner(zcu, .inits); +} + +pub fn resolveStructAlignment(ty: Type, zcu: *Zcu) SemaError!void { + return ty.resolveStructInner(zcu, .alignment); +} + +pub fn resolveUnionAlignment(ty: Type, zcu: *Zcu) SemaError!void { + return ty.resolveUnionInner(zcu, .alignment); +} + +/// `ty` must be a struct. +fn resolveStructInner( + ty: Type, + zcu: *Zcu, + resolution: enum { fields, inits, alignment, layout, full }, +) SemaError!void { + const gpa = zcu.gpa; + + const struct_obj = zcu.typeToStruct(ty).?; + const owner_decl_index = struct_obj.decl.unwrap() orelse return; + + var analysis_arena = std.heap.ArenaAllocator.init(gpa); + defer analysis_arena.deinit(); + + var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa); + defer comptime_err_ret_trace.deinit(); + + var sema: Sema = .{ + .mod = zcu, + .gpa = gpa, + .arena = analysis_arena.allocator(), + .code = undefined, // This ZIR will not be used. + .owner_decl = zcu.declPtr(owner_decl_index), + .owner_decl_index = owner_decl_index, + .func_index = .none, + .func_is_naked = false, + .fn_ret_ty = Type.void, + .fn_ret_ty_ies = null, + .owner_func_index = .none, + .comptime_err_ret_trace = &comptime_err_ret_trace, + }; + defer sema.deinit(); + + switch (resolution) { + .fields => return sema.resolveTypeFieldsStruct(ty.toIntern(), struct_obj), + .inits => return sema.resolveStructFieldInits(ty), + .alignment => return sema.resolveStructAlignment(ty.toIntern(), struct_obj), + .layout => return sema.resolveStructLayout(ty), + .full => return sema.resolveStructFully(ty), + } +} + +/// `ty` must be a union. +fn resolveUnionInner( + ty: Type, + zcu: *Zcu, + resolution: enum { fields, alignment, layout, full }, +) SemaError!void { + const gpa = zcu.gpa; + + const union_obj = zcu.typeToUnion(ty).?; + const owner_decl_index = union_obj.decl; + + var analysis_arena = std.heap.ArenaAllocator.init(gpa); + defer analysis_arena.deinit(); + + var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa); + defer comptime_err_ret_trace.deinit(); + + var sema: Sema = .{ + .mod = zcu, + .gpa = gpa, + .arena = analysis_arena.allocator(), + .code = undefined, // This ZIR will not be used. + .owner_decl = zcu.declPtr(owner_decl_index), + .owner_decl_index = owner_decl_index, + .func_index = .none, + .func_is_naked = false, + .fn_ret_ty = Type.void, + .fn_ret_ty_ies = null, + .owner_func_index = .none, + .comptime_err_ret_trace = &comptime_err_ret_trace, + }; + defer sema.deinit(); + + switch (resolution) { + .fields => return sema.resolveTypeFieldsUnion(ty, union_obj), + .alignment => return sema.resolveUnionAlignment(ty, union_obj), + .layout => return sema.resolveUnionLayout(ty), + .full => return sema.resolveUnionFully(ty), + } +} + +/// Fully resolves a simple type. This is usually a nop, but for builtin types with +/// special InternPool indices (such as std.builtin.Type) it will analyze and fully +/// resolve the type. +fn resolveSimpleType(simple_type: InternPool.SimpleType, zcu: *Zcu) Allocator.Error!void { + const builtin_type_name: []const u8 = switch (simple_type) { + .atomic_order => "AtomicOrder", + .atomic_rmw_op => "AtomicRmwOp", + .calling_convention => "CallingConvention", + .address_space => "AddressSpace", + .float_mode => "FloatMode", + .reduce_op => "ReduceOp", + .call_modifier => "CallModifer", + .prefetch_options => "PrefetchOptions", + .export_options => "ExportOptions", + .extern_options => "ExternOptions", + .type_info => "Type", + else => return, + }; + // This will fully resolve the type. + _ = try zcu.getBuiltinType(builtin_type_name); +} + +/// Returns the type of a pointer to an element. +/// Asserts that the type is a pointer, and that the element type is indexable. +/// If the element index is comptime-known, it must be passed in `offset`. +/// For *@Vector(n, T), return *align(a:b:h:v) T +/// For *[N]T, return *T +/// For [*]T, returns *T +/// For []T, returns *T +/// Handles const-ness and address spaces in particular. +/// This code is duplicated in `Sema.analyzePtrArithmetic`. +/// May perform type resolution and return a transitive `error.AnalysisFail`. +pub fn elemPtrType(ptr_ty: Type, offset: ?usize, zcu: *Zcu) !Type { + const ptr_info = ptr_ty.ptrInfo(zcu); + const elem_ty = ptr_ty.elemType2(zcu); + const is_allowzero = ptr_info.flags.is_allowzero and (offset orelse 0) == 0; + const parent_ty = ptr_ty.childType(zcu); + + const VI = InternPool.Key.PtrType.VectorIndex; + + const vector_info: struct { + host_size: u16 = 0, + alignment: Alignment = .none, + vector_index: VI = .none, + } = if (parent_ty.isVector(zcu) and ptr_info.flags.size == .One) blk: { + const elem_bits = elem_ty.bitSize(zcu); + if (elem_bits == 0) break :blk .{}; + const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits); + if (!is_packed) break :blk .{}; + + break :blk .{ + .host_size = @intCast(parent_ty.arrayLen(zcu)), + .alignment = parent_ty.abiAlignment(zcu), + .vector_index = if (offset) |some| @enumFromInt(some) else .runtime, + }; + } else .{}; + + const alignment: Alignment = a: { + // Calculate the new pointer alignment. + if (ptr_info.flags.alignment == .none) { + // In case of an ABI-aligned pointer, any pointer arithmetic + // maintains the same ABI-alignedness. + break :a vector_info.alignment; + } + // If the addend is not a comptime-known value we can still count on + // it being a multiple of the type size. + const elem_size = (try elem_ty.abiSizeAdvanced(zcu, .sema)).scalar; + const addend = if (offset) |off| elem_size * off else elem_size; + + // The resulting pointer is aligned to the lcd between the offset (an + // arbitrary number) and the alignment factor (always a power of two, + // non zero). + const new_align: Alignment = @enumFromInt(@min( + @ctz(addend), + ptr_info.flags.alignment.toLog2Units(), + )); + assert(new_align != .none); + break :a new_align; + }; + return zcu.ptrTypeSema(.{ + .child = elem_ty.toIntern(), + .flags = .{ + .alignment = alignment, + .is_const = ptr_info.flags.is_const, + .is_volatile = ptr_info.flags.is_volatile, + .is_allowzero = is_allowzero, + .address_space = ptr_info.flags.address_space, + .vector_index = vector_info.vector_index, + }, + .packed_offset = .{ + .host_size = vector_info.host_size, + .bit_offset = 0, + }, + }); +} + pub const @"u1": Type = .{ .ip_index = .u1_type }; pub const @"u8": Type = .{ .ip_index = .u8_type }; pub const @"u16": Type = .{ .ip_index = .u16_type }; diff --git a/src/Value.zig b/src/Value.zig index 20b24510ef..34a0472c16 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -161,9 +161,11 @@ pub fn intFromEnum(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { }; } +pub const ResolveStrat = Type.ResolveStrat; + /// Asserts the value is an integer. pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *Module) BigIntConst { - return val.toBigIntAdvanced(space, mod, null) catch unreachable; + return val.toBigIntAdvanced(space, mod, .normal) catch unreachable; } /// Asserts the value is an integer. @@ -171,7 +173,7 @@ pub fn toBigIntAdvanced( val: Value, space: *BigIntSpace, mod: *Module, - opt_sema: ?*Sema, + strat: ResolveStrat, ) Module.CompileError!BigIntConst { return switch (val.toIntern()) { .bool_false => BigIntMutable.init(&space.limbs, 0).toConst(), @@ -181,7 +183,7 @@ pub fn toBigIntAdvanced( .int => |int| switch (int.storage) { .u64, .i64, .big_int => int.storage.toBigInt(space), .lazy_align, .lazy_size => |ty| { - if (opt_sema) |sema| try sema.resolveTypeLayout(Type.fromInterned(ty)); + if (strat == .sema) try Type.fromInterned(ty).resolveLayout(mod); const x = switch (int.storage) { else => unreachable, .lazy_align => Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0, @@ -190,10 +192,10 @@ pub fn toBigIntAdvanced( return BigIntMutable.init(&space.limbs, x).toConst(); }, }, - .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, mod, opt_sema), + .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, mod, strat), .opt, .ptr => BigIntMutable.init( &space.limbs, - (try val.getUnsignedIntAdvanced(mod, opt_sema)).?, + (try val.getUnsignedIntAdvanced(mod, strat)).?, ).toConst(), else => unreachable, }, @@ -228,12 +230,12 @@ pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable { /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. pub fn getUnsignedInt(val: Value, mod: *Module) ?u64 { - return getUnsignedIntAdvanced(val, mod, null) catch unreachable; + return getUnsignedIntAdvanced(val, mod, .normal) catch unreachable; } /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. -pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 { +pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, strat: ResolveStrat) !?u64 { return switch (val.toIntern()) { .undef => unreachable, .bool_false => 0, @@ -244,28 +246,22 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 .big_int => |big_int| big_int.to(u64) catch null, .u64 => |x| x, .i64 => |x| std.math.cast(u64, x), - .lazy_align => |ty| if (opt_sema) |sema| - (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits() orelse 0 - else - Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0, - .lazy_size => |ty| if (opt_sema) |sema| - (try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar - else - Type.fromInterned(ty).abiSize(mod), + .lazy_align => |ty| (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, strat.toLazy())).scalar.toByteUnits() orelse 0, + .lazy_size => |ty| (try Type.fromInterned(ty).abiSizeAdvanced(mod, strat.toLazy())).scalar, }, .ptr => |ptr| switch (ptr.base_addr) { .int => ptr.byte_offset, .field => |field| { - const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; + const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(mod, strat)) orelse return null; const struct_ty = Value.fromInterned(field.base).typeOf(mod).childType(mod); - if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty); + if (strat == .sema) try struct_ty.resolveLayout(mod); return base_addr + struct_ty.structFieldOffset(@intCast(field.index), mod) + ptr.byte_offset; }, else => null, }, .opt => |opt| switch (opt.val) { .none => 0, - else => |payload| Value.fromInterned(payload).getUnsignedIntAdvanced(mod, opt_sema), + else => |payload| Value.fromInterned(payload).getUnsignedIntAdvanced(mod, strat), }, else => null, }, @@ -273,13 +269,13 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 } /// Asserts the value is an integer and it fits in a u64 -pub fn toUnsignedInt(val: Value, mod: *Module) u64 { - return getUnsignedInt(val, mod).?; +pub fn toUnsignedInt(val: Value, zcu: *Zcu) u64 { + return getUnsignedInt(val, zcu).?; } /// Asserts the value is an integer and it fits in a u64 -pub fn toUnsignedIntAdvanced(val: Value, sema: *Sema) !u64 { - return (try getUnsignedIntAdvanced(val, sema.mod, sema)).?; +pub fn toUnsignedIntSema(val: Value, zcu: *Zcu) !u64 { + return (try getUnsignedIntAdvanced(val, zcu, .sema)).?; } /// Asserts the value is an integer and it fits in a i64 @@ -1028,13 +1024,13 @@ pub fn floatHasFraction(self: Value, mod: *const Module) bool { } pub fn orderAgainstZero(lhs: Value, mod: *Module) std.math.Order { - return orderAgainstZeroAdvanced(lhs, mod, null) catch unreachable; + return orderAgainstZeroAdvanced(lhs, mod, .normal) catch unreachable; } pub fn orderAgainstZeroAdvanced( lhs: Value, mod: *Module, - opt_sema: ?*Sema, + strat: ResolveStrat, ) Module.CompileError!std.math.Order { return switch (lhs.toIntern()) { .bool_false => .eq, @@ -1052,13 +1048,13 @@ pub fn orderAgainstZeroAdvanced( .lazy_size => |ty| return if (Type.fromInterned(ty).hasRuntimeBitsAdvanced( mod, false, - if (opt_sema) |sema| .{ .sema = sema } else .eager, + strat.toLazy(), ) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }) .gt else .eq, }, - .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroAdvanced(mod, opt_sema), + .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroAdvanced(mod, strat), .float => |float| switch (float.storage) { inline else => |x| std.math.order(x, 0), }, @@ -1069,14 +1065,13 @@ pub fn orderAgainstZeroAdvanced( /// Asserts the value is comparable. pub fn order(lhs: Value, rhs: Value, mod: *Module) std.math.Order { - return orderAdvanced(lhs, rhs, mod, null) catch unreachable; + return orderAdvanced(lhs, rhs, mod, .normal) catch unreachable; } /// Asserts the value is comparable. -/// If opt_sema is null then this function asserts things are resolved and cannot fail. -pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, opt_sema: ?*Sema) !std.math.Order { - const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, opt_sema); - const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, opt_sema); +pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, strat: ResolveStrat) !std.math.Order { + const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, strat); + const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, strat); switch (lhs_against_zero) { .lt => if (rhs_against_zero != .lt) return .lt, .eq => return rhs_against_zero.invert(), @@ -1096,15 +1091,15 @@ pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, opt_sema: ?*Sema) !st var lhs_bigint_space: BigIntSpace = undefined; var rhs_bigint_space: BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, opt_sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, opt_sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, strat); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, strat); return lhs_bigint.order(rhs_bigint); } /// Asserts the value is comparable. Does not take a type parameter because it supports /// comparisons between heterogeneous types. pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *Module) bool { - return compareHeteroAdvanced(lhs, op, rhs, mod, null) catch unreachable; + return compareHeteroAdvanced(lhs, op, rhs, mod, .normal) catch unreachable; } pub fn compareHeteroAdvanced( @@ -1112,7 +1107,7 @@ pub fn compareHeteroAdvanced( op: std.math.CompareOperator, rhs: Value, mod: *Module, - opt_sema: ?*Sema, + strat: ResolveStrat, ) !bool { if (lhs.pointerDecl(mod)) |lhs_decl| { if (rhs.pointerDecl(mod)) |rhs_decl| { @@ -1135,7 +1130,7 @@ pub fn compareHeteroAdvanced( else => {}, } } - return (try orderAdvanced(lhs, rhs, mod, opt_sema)).compare(op); + return (try orderAdvanced(lhs, rhs, mod, strat)).compare(op); } /// Asserts the values are comparable. Both operands have type `ty`. @@ -1176,22 +1171,22 @@ pub fn compareScalar( /// /// Note that `!compareAllWithZero(.eq, ...) != compareAllWithZero(.neq, ...)` pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator, mod: *Module) bool { - return compareAllWithZeroAdvancedExtra(lhs, op, mod, null) catch unreachable; + return compareAllWithZeroAdvancedExtra(lhs, op, mod, .normal) catch unreachable; } -pub fn compareAllWithZeroAdvanced( +pub fn compareAllWithZeroSema( lhs: Value, op: std.math.CompareOperator, - sema: *Sema, + zcu: *Zcu, ) Module.CompileError!bool { - return compareAllWithZeroAdvancedExtra(lhs, op, sema.mod, sema); + return compareAllWithZeroAdvancedExtra(lhs, op, zcu, .sema); } pub fn compareAllWithZeroAdvancedExtra( lhs: Value, op: std.math.CompareOperator, mod: *Module, - opt_sema: ?*Sema, + strat: ResolveStrat, ) Module.CompileError!bool { if (lhs.isInf(mod)) { switch (op) { @@ -1211,14 +1206,14 @@ pub fn compareAllWithZeroAdvancedExtra( if (!std.math.order(byte, 0).compare(op)) break false; } else true, .elems => |elems| for (elems) |elem| { - if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false; + if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, strat)) break false; } else true, - .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, opt_sema), + .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, strat), }, .undef => return false, else => {}, } - return (try orderAgainstZeroAdvanced(lhs, mod, opt_sema)).compare(op); + return (try orderAgainstZeroAdvanced(lhs, mod, strat)).compare(op); } pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool { @@ -1279,9 +1274,9 @@ pub fn slicePtr(val: Value, mod: *Module) Value { } /// Gets the `len` field of a slice value as a `u64`. -/// Resolves the length using the provided `Sema` if necessary. -pub fn sliceLen(val: Value, sema: *Sema) !u64 { - return Value.fromInterned(sema.mod.intern_pool.sliceLen(val.toIntern())).toUnsignedIntAdvanced(sema); +/// Resolves the length using `Sema` if necessary. +pub fn sliceLen(val: Value, zcu: *Zcu) !u64 { + return Value.fromInterned(zcu.intern_pool.sliceLen(val.toIntern())).toUnsignedIntSema(zcu); } /// Asserts the value is an aggregate, and returns the element value at the given index. @@ -1482,29 +1477,29 @@ pub fn isFloat(self: Value, mod: *const Module) bool { } pub fn floatFromInt(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module) !Value { - return floatFromIntAdvanced(val, arena, int_ty, float_ty, mod, null) catch |err| switch (err) { + return floatFromIntAdvanced(val, arena, int_ty, float_ty, mod, .normal) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => unreachable, }; } -pub fn floatFromIntAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { +pub fn floatFromIntAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, strat: ResolveStrat) !Value { if (int_ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, int_ty.vectorLen(mod)); const scalar_ty = float_ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, mod, opt_sema)).toIntern(); + scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, mod, strat)).toIntern(); } return Value.fromInterned((try mod.intern(.{ .aggregate = .{ .ty = float_ty.toIntern(), .storage = .{ .elems = result_data }, } }))); } - return floatFromIntScalar(val, float_ty, mod, opt_sema); + return floatFromIntScalar(val, float_ty, mod, strat); } -pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { +pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, strat: ResolveStrat) !Value { return switch (mod.intern_pool.indexToKey(val.toIntern())) { .undef => try mod.undefValue(float_ty), .int => |int| switch (int.storage) { @@ -1513,16 +1508,8 @@ pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?* return mod.floatValue(float_ty, float); }, inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, mod), - .lazy_align => |ty| if (opt_sema) |sema| { - return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits() orelse 0, float_ty, mod); - } else { - return floatFromIntInner(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0, float_ty, mod); - }, - .lazy_size => |ty| if (opt_sema) |sema| { - return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); - } else { - return floatFromIntInner(Type.fromInterned(ty).abiSize(mod), float_ty, mod); - }, + .lazy_align => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, strat.toLazy())).scalar.toByteUnits() orelse 0, float_ty, mod), + .lazy_size => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(mod, strat.toLazy())).scalar, float_ty, mod), }, else => unreachable, }; @@ -3616,17 +3603,15 @@ pub const RuntimeIndex = InternPool.RuntimeIndex; /// `parent_ptr` must be a single-pointer to some optional. /// Returns a pointer to the payload of the optional. -/// This takes a `Sema` because it may need to perform type resolution. -pub fn ptrOptPayload(parent_ptr: Value, sema: *Sema) !Value { - const zcu = sema.mod; - +/// May perform type resolution. +pub fn ptrOptPayload(parent_ptr: Value, zcu: *Zcu) !Value { const parent_ptr_ty = parent_ptr.typeOf(zcu); const opt_ty = parent_ptr_ty.childType(zcu); assert(parent_ptr_ty.ptrSize(zcu) == .One); assert(opt_ty.zigTypeTag(zcu) == .Optional); - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_ty.ptrInfo(zcu); // We can correctly preserve alignment `.none`, since an optional has the same // natural alignment as its child type. @@ -3651,17 +3636,15 @@ pub fn ptrOptPayload(parent_ptr: Value, sema: *Sema) !Value { /// `parent_ptr` must be a single-pointer to some error union. /// Returns a pointer to the payload of the error union. -/// This takes a `Sema` because it may need to perform type resolution. -pub fn ptrEuPayload(parent_ptr: Value, sema: *Sema) !Value { - const zcu = sema.mod; - +/// May perform type resolution. +pub fn ptrEuPayload(parent_ptr: Value, zcu: *Zcu) !Value { const parent_ptr_ty = parent_ptr.typeOf(zcu); const eu_ty = parent_ptr_ty.childType(zcu); assert(parent_ptr_ty.ptrSize(zcu) == .One); assert(eu_ty.zigTypeTag(zcu) == .ErrorUnion); - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_ty.ptrInfo(zcu); // We can correctly preserve alignment `.none`, since an error union has a // natural alignment greater than or equal to that of its payload type. @@ -3682,10 +3665,8 @@ pub fn ptrEuPayload(parent_ptr: Value, sema: *Sema) !Value { /// `parent_ptr` must be a single-pointer to a struct, union, or slice. /// Returns a pointer to the aggregate field at the specified index. /// For slices, uses `slice_ptr_index` and `slice_len_index`. -/// This takes a `Sema` because it may need to perform type resolution. -pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { - const zcu = sema.mod; - +/// May perform type resolution. +pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value { const parent_ptr_ty = parent_ptr.typeOf(zcu); const aggregate_ty = parent_ptr_ty.childType(zcu); @@ -3698,17 +3679,17 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { .Struct => field: { const field_ty = aggregate_ty.structFieldType(field_idx, zcu); switch (aggregate_ty.containerLayout(zcu)) { - .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, sema) }, + .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, .sema) }, .@"extern" => { // Well-defined layout, so just offset the pointer appropriately. const byte_off = aggregate_ty.structFieldOffset(field_idx, zcu); const field_align = a: { const parent_align = if (parent_ptr_info.flags.alignment == .none) pa: { - break :pa try sema.typeAbiAlignment(aggregate_ty); + break :pa (try aggregate_ty.abiAlignmentAdvanced(zcu, .sema)).scalar; } else parent_ptr_info.flags.alignment; break :a InternPool.Alignment.fromLog2Units(@min(parent_align.toLog2Units(), @ctz(byte_off))); }; - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); new.flags.alignment = field_align; @@ -3723,14 +3704,14 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { new.packed_offset = packed_offset; new.child = field_ty.toIntern(); if (new.flags.alignment == .none) { - new.flags.alignment = try sema.typeAbiAlignment(aggregate_ty); + new.flags.alignment = (try aggregate_ty.abiAlignmentAdvanced(zcu, .sema)).scalar; } break :info new; }); return zcu.getCoerced(parent_ptr, result_ty); }, .byte_ptr => |ptr_info| { - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); new.packed_offset = .{ @@ -3749,10 +3730,10 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { const union_obj = zcu.typeToUnion(aggregate_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[field_idx]); switch (aggregate_ty.containerLayout(zcu)) { - .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, sema) }, + .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, .sema) }, .@"extern" => { // Point to the same address. - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); break :info new; @@ -3762,28 +3743,28 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { .@"packed" => { // If the field has an ABI size matching its bit size, then we can continue to use a // non-bit pointer if the parent pointer is also a non-bit pointer. - if (parent_ptr_info.packed_offset.host_size == 0 and try sema.typeAbiSize(field_ty) * 8 == try field_ty.bitSizeAdvanced(zcu, sema)) { + if (parent_ptr_info.packed_offset.host_size == 0 and (try field_ty.abiSizeAdvanced(zcu, .sema)).scalar * 8 == try field_ty.bitSizeAdvanced(zcu, .sema)) { // We must offset the pointer on big-endian targets, since the bits of packed memory don't align nicely. const byte_offset = switch (zcu.getTarget().cpu.arch.endian()) { .little => 0, - .big => try sema.typeAbiSize(aggregate_ty) - try sema.typeAbiSize(field_ty), + .big => (try aggregate_ty.abiSizeAdvanced(zcu, .sema)).scalar - (try field_ty.abiSizeAdvanced(zcu, .sema)).scalar, }; - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); new.flags.alignment = InternPool.Alignment.fromLog2Units( - @ctz(byte_offset | (try parent_ptr_ty.ptrAlignmentAdvanced(zcu, sema)).toByteUnits().?), + @ctz(byte_offset | (try parent_ptr_ty.ptrAlignmentAdvanced(zcu, .sema)).toByteUnits().?), ); break :info new; }); return parent_ptr.getOffsetPtr(byte_offset, result_ty, zcu); } else { // The result must be a bit-pointer if it is not already. - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); if (new.packed_offset.host_size == 0) { - new.packed_offset.host_size = @intCast(((try aggregate_ty.bitSizeAdvanced(zcu, sema)) + 7) / 8); + new.packed_offset.host_size = @intCast(((try aggregate_ty.bitSizeAdvanced(zcu, .sema)) + 7) / 8); assert(new.packed_offset.bit_offset == 0); } break :info new; @@ -3805,14 +3786,14 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { }; const new_align: InternPool.Alignment = if (parent_ptr_info.flags.alignment != .none) a: { - const ty_align = try sema.typeAbiAlignment(field_ty); + const ty_align = (try field_ty.abiAlignmentAdvanced(zcu, .sema)).scalar; const true_field_align = if (field_align == .none) ty_align else field_align; const new_align = true_field_align.min(parent_ptr_info.flags.alignment); if (new_align == ty_align) break :a .none; break :a new_align; } else field_align; - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); new.flags.alignment = new_align; @@ -3834,10 +3815,8 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { /// `orig_parent_ptr` must be either a single-pointer to an array or vector, or a many-pointer or C-pointer or slice. /// Returns a pointer to the element at the specified index. -/// This takes a `Sema` because it may need to perform type resolution. -pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, sema: *Sema) !Value { - const zcu = sema.mod; - +/// May perform type resolution. +pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, zcu: *Zcu) !Value { const parent_ptr = switch (orig_parent_ptr.typeOf(zcu).ptrSize(zcu)) { .One, .Many, .C => orig_parent_ptr, .Slice => orig_parent_ptr.slicePtr(zcu), @@ -3845,7 +3824,7 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, sema: *Sema) !Value { const parent_ptr_ty = parent_ptr.typeOf(zcu); const elem_ty = parent_ptr_ty.childType(zcu); - const result_ty = try sema.elemPtrType(parent_ptr_ty, @intCast(field_idx)); + const result_ty = try parent_ptr_ty.elemPtrType(@intCast(field_idx), zcu); if (parent_ptr.isUndef(zcu)) return zcu.undefValue(result_ty); @@ -3862,21 +3841,21 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, sema: *Sema) !Value { const strat: PtrStrat = switch (parent_ptr_ty.ptrSize(zcu)) { .One => switch (elem_ty.zigTypeTag(zcu)) { - .Vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeAdvanced(zcu, sema), 8) }, + .Vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeAdvanced(zcu, .sema), 8) }, .Array => strat: { const arr_elem_ty = elem_ty.childType(zcu); - if (try sema.typeRequiresComptime(arr_elem_ty)) { + if (try arr_elem_ty.comptimeOnlyAdvanced(zcu, .sema)) { break :strat .{ .elem_ptr = arr_elem_ty }; } - break :strat .{ .offset = field_idx * try sema.typeAbiSize(arr_elem_ty) }; + break :strat .{ .offset = field_idx * (try arr_elem_ty.abiSizeAdvanced(zcu, .sema)).scalar }; }, else => unreachable, }, - .Many, .C => if (try sema.typeRequiresComptime(elem_ty)) + .Many, .C => if (try elem_ty.comptimeOnlyAdvanced(zcu, .sema)) .{ .elem_ptr = elem_ty } else - .{ .offset = field_idx * try sema.typeAbiSize(elem_ty) }, + .{ .offset = field_idx * (try elem_ty.abiSizeAdvanced(zcu, .sema)).scalar }, .Slice => unreachable, }; @@ -4014,11 +3993,7 @@ pub const PointerDeriveStep = union(enum) { pub fn pointerDerivation(ptr_val: Value, arena: Allocator, zcu: *Zcu) Allocator.Error!PointerDeriveStep { return ptr_val.pointerDerivationAdvanced(arena, zcu, null) catch |err| switch (err) { error.OutOfMemory => |e| return e, - error.AnalysisFail, - error.GenericPoison, - error.ComptimeReturn, - error.ComptimeBreak, - => unreachable, + error.AnalysisFail => unreachable, }; } @@ -4087,8 +4062,8 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op const base_ptr_ty = base_ptr.typeOf(zcu); const agg_ty = base_ptr_ty.childType(zcu); const field_ty, const field_align = switch (agg_ty.zigTypeTag(zcu)) { - .Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, opt_sema) }, - .Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, opt_sema) }, + .Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, .sema) }, + .Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, .sema) }, .Pointer => .{ switch (field.index) { Value.slice_ptr_index => agg_ty.slicePtrFieldType(zcu), Value.slice_len_index => Type.usize, @@ -4269,3 +4244,118 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op .new_ptr_ty = Type.fromInterned(ptr.ty), } }; } + +pub fn resolveLazy(val: Value, arena: Allocator, zcu: *Zcu) Zcu.SemaError!Value { + switch (zcu.intern_pool.indexToKey(val.toIntern())) { + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => return val, + .lazy_align, .lazy_size => return zcu.intValue( + Type.fromInterned(int.ty), + (try val.getUnsignedIntAdvanced(zcu, .sema)).?, + ), + }, + .slice => |slice| { + const ptr = try Value.fromInterned(slice.ptr).resolveLazy(arena, zcu); + const len = try Value.fromInterned(slice.len).resolveLazy(arena, zcu); + if (ptr.toIntern() == slice.ptr and len.toIntern() == slice.len) return val; + return Value.fromInterned(try zcu.intern(.{ .slice = .{ + .ty = slice.ty, + .ptr = ptr.toIntern(), + .len = len.toIntern(), + } })); + }, + .ptr => |ptr| { + switch (ptr.base_addr) { + .decl, .comptime_alloc, .anon_decl, .int => return val, + .comptime_field => |field_val| { + const resolved_field_val = (try Value.fromInterned(field_val).resolveLazy(arena, zcu)).toIntern(); + return if (resolved_field_val == field_val) + val + else + Value.fromInterned((try zcu.intern(.{ .ptr = .{ + .ty = ptr.ty, + .base_addr = .{ .comptime_field = resolved_field_val }, + .byte_offset = ptr.byte_offset, + } }))); + }, + .eu_payload, .opt_payload => |base| { + const resolved_base = (try Value.fromInterned(base).resolveLazy(arena, zcu)).toIntern(); + return if (resolved_base == base) + val + else + Value.fromInterned((try zcu.intern(.{ .ptr = .{ + .ty = ptr.ty, + .base_addr = switch (ptr.base_addr) { + .eu_payload => .{ .eu_payload = resolved_base }, + .opt_payload => .{ .opt_payload = resolved_base }, + else => unreachable, + }, + .byte_offset = ptr.byte_offset, + } }))); + }, + .arr_elem, .field => |base_index| { + const resolved_base = (try Value.fromInterned(base_index.base).resolveLazy(arena, zcu)).toIntern(); + return if (resolved_base == base_index.base) + val + else + Value.fromInterned((try zcu.intern(.{ .ptr = .{ + .ty = ptr.ty, + .base_addr = switch (ptr.base_addr) { + .arr_elem => .{ .arr_elem = .{ + .base = resolved_base, + .index = base_index.index, + } }, + .field => .{ .field = .{ + .base = resolved_base, + .index = base_index.index, + } }, + else => unreachable, + }, + .byte_offset = ptr.byte_offset, + } }))); + }, + } + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => return val, + .elems => |elems| { + var resolved_elems: []InternPool.Index = &.{}; + for (elems, 0..) |elem, i| { + const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, zcu)).toIntern(); + if (resolved_elems.len == 0 and resolved_elem != elem) { + resolved_elems = try arena.alloc(InternPool.Index, elems.len); + @memcpy(resolved_elems[0..i], elems[0..i]); + } + if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem; + } + return if (resolved_elems.len == 0) val else Value.fromInterned((try zcu.intern(.{ .aggregate = .{ + .ty = aggregate.ty, + .storage = .{ .elems = resolved_elems }, + } }))); + }, + .repeated_elem => |elem| { + const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, zcu)).toIntern(); + return if (resolved_elem == elem) val else Value.fromInterned((try zcu.intern(.{ .aggregate = .{ + .ty = aggregate.ty, + .storage = .{ .repeated_elem = resolved_elem }, + } }))); + }, + }, + .un => |un| { + const resolved_tag = if (un.tag == .none) + .none + else + (try Value.fromInterned(un.tag).resolveLazy(arena, zcu)).toIntern(); + const resolved_val = (try Value.fromInterned(un.val).resolveLazy(arena, zcu)).toIntern(); + return if (resolved_tag == un.tag and resolved_val == un.val) + val + else + Value.fromInterned((try zcu.intern(.{ .un = .{ + .ty = un.ty, + .tag = resolved_tag, + .val = resolved_val, + } }))); + }, + else => return val, + } +} diff --git a/src/Zcu.zig b/src/Zcu.zig index 27e9347268..e3b85e957d 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -3593,7 +3593,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In }, error.OutOfMemory => return error.OutOfMemory, }; - defer air.deinit(gpa); + errdefer air.deinit(gpa); const invalidate_ies_deps = i: { if (!was_outdated) break :i false; @@ -3615,13 +3615,36 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In const dump_llvm_ir = build_options.enable_debug_extensions and (comp.verbose_llvm_ir != null or comp.verbose_llvm_bc != null); if (comp.bin_file == null and zcu.llvm_object == null and !dump_air and !dump_llvm_ir) { + air.deinit(gpa); return; } + try comp.work_queue.writeItem(.{ .codegen_func = .{ + .func = func_index, + .air = air, + } }); +} + +/// Takes ownership of `air`, even on error. +/// If any types referenced by `air` are unresolved, marks the codegen as failed. +pub fn linkerUpdateFunc(zcu: *Zcu, func_index: InternPool.Index, air: Air) Allocator.Error!void { + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + const comp = zcu.comp; + + defer { + var air_mut = air; + air_mut.deinit(gpa); + } + + const func = zcu.funcInfo(func_index); + const decl_index = func.owner_decl; + const decl = zcu.declPtr(decl_index); + var liveness = try Liveness.analyze(gpa, air, ip); defer liveness.deinit(gpa); - if (dump_air) { + if (build_options.enable_debug_extensions and comp.verbose_air) { const fqn = try decl.fullyQualifiedName(zcu); std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)}); @import("print_air.zig").dump(zcu, air, liveness); @@ -3629,7 +3652,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In } if (std.debug.runtime_safety) { - var verify = Liveness.Verify{ + var verify: Liveness.Verify = .{ .gpa = gpa, .air = air, .liveness = liveness, @@ -3642,7 +3665,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In else => { try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); zcu.failed_analysis.putAssumeCapacityNoClobber( - AnalUnit.wrap(.{ .decl = decl_index }), + AnalUnit.wrap(.{ .func = func_index }), try Module.ErrorMsg.create( gpa, decl.navSrcLoc(zcu), @@ -3659,7 +3682,13 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(ip), 0); defer codegen_prog_node.end(); - if (comp.bin_file) |lf| { + if (!air.typesFullyResolved(zcu)) { + // A type we depend on failed to resolve. This is a transitive failure. + // Correcting this failure will involve changing a type this function + // depends on, hence triggering re-analysis of this function, so this + // interacts correctly with incremental compilation. + func.analysis(ip).state = .codegen_failure; + } else if (comp.bin_file) |lf| { lf.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { @@ -3667,7 +3696,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In }, else => { try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); - zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try Module.ErrorMsg.create( + zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .func = func_index }), try Module.ErrorMsg.create( gpa, decl.navSrcLoc(zcu), "unable to codegen: {s}", @@ -3735,7 +3764,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index) // Decl itself is safely analyzed, and body analysis is not yet queued - try mod.comp.work_queue.writeItem(.{ .codegen_func = func_index }); + try mod.comp.work_queue.writeItem(.{ .analyze_func = func_index }); if (mod.emit_h != null) { // TODO: we ideally only want to do this if the function's type changed // since the last update @@ -3812,7 +3841,7 @@ fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespa decl.analysis = .complete; try zcu.scanNamespace(namespace_index, decls, decl); - + try zcu.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); return wip_ty.finish(ip, decl_index, namespace_index.toOptional()); } @@ -4103,7 +4132,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { // Note this resolves the type of the Decl, not the value; if this Decl // is a struct, for example, this resolves `type` (which needs no resolution), // not the struct itself. - try sema.resolveTypeLayout(decl_ty); + try decl_ty.resolveLayout(mod); if (decl.kind == .@"usingnamespace") { if (!decl_ty.eql(Type.type, mod)) { @@ -4220,7 +4249,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { if (has_runtime_bits) { // Needed for codegen_decl which will call updateDecl and then the // codegen backend wants full access to the Decl Type. - try sema.resolveTypeFully(decl_ty); + try decl_ty.resolveFully(mod); try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index }); @@ -5212,23 +5241,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato else => |e| return e, }; - // Similarly, resolve any queued up types that were requested to be resolved for - // the backends. - for (sema.types_to_resolve.keys()) |ty| { - sema.resolveTypeFully(Type.fromInterned(ty)) catch |err| switch (err) { - error.GenericPoison => unreachable, - error.ComptimeReturn => unreachable, - error.ComptimeBreak => unreachable, - error.AnalysisFail => { - // In this case our function depends on a type that had a compile error. - // We should not try to lower this function. - decl.analysis = .dependency_failure; - return error.AnalysisFail; - }, - else => |e| return e, - }; - } - try sema.flushExports(); return .{ @@ -5793,6 +5805,16 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type return Type.fromInterned((try intern(mod, .{ .ptr_type = canon_info }))); } +/// Like `ptrType`, but if `info` specifies an `alignment`, first ensures the pointer +/// child type's alignment is resolved so that an invalid alignment is not used. +/// In general, prefer this function during semantic analysis. +pub fn ptrTypeSema(zcu: *Zcu, info: InternPool.Key.PtrType) SemaError!Type { + if (info.flags.alignment != .none) { + _ = try Type.fromInterned(info.child).abiAlignmentAdvanced(zcu, .sema); + } + return zcu.ptrType(info); +} + pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { return ptrType(mod, .{ .child = child_type.toIntern() }); } @@ -6368,15 +6390,21 @@ pub fn unionAbiAlignment(mod: *Module, loaded_union: InternPool.LoadedUnionType) return max_align; } -/// Returns the field alignment, assuming the union is not packed. -/// Keep implementation in sync with `Sema.unionFieldAlignment`. -/// Prefer to call that function instead of this one during Sema. -pub fn unionFieldNormalAlignment(mod: *Module, loaded_union: InternPool.LoadedUnionType, field_index: u32) Alignment { - const ip = &mod.intern_pool; +/// Returns the field alignment of a non-packed union. Asserts the layout is not packed. +pub fn unionFieldNormalAlignment(zcu: *Zcu, loaded_union: InternPool.LoadedUnionType, field_index: u32) Alignment { + return zcu.unionFieldNormalAlignmentAdvanced(loaded_union, field_index, .normal) catch unreachable; +} + +/// Returns the field alignment of a non-packed union. Asserts the layout is not packed. +/// If `strat` is `.sema`, may perform type resolution. +pub fn unionFieldNormalAlignmentAdvanced(zcu: *Zcu, loaded_union: InternPool.LoadedUnionType, field_index: u32, strat: Type.ResolveStrat) SemaError!Alignment { + const ip = &zcu.intern_pool; + assert(loaded_union.flagsPtr(ip).layout != .@"packed"); const field_align = loaded_union.fieldAlign(ip, field_index); if (field_align != .none) return field_align; const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); - return field_ty.abiAlignment(mod); + if (field_ty.isNoReturn(zcu)) return .none; + return (try field_ty.abiAlignmentAdvanced(zcu, strat.toLazy())).scalar; } /// Returns the index of the active field, given the current tag value @@ -6387,41 +6415,37 @@ pub fn unionTagFieldIndex(mod: *Module, loaded_union: InternPool.LoadedUnionType return loaded_union.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern()); } -/// Returns the field alignment of a non-packed struct in byte units. -/// Keep implementation in sync with `Sema.structFieldAlignment`. -/// asserts the layout is not packed. +/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed. pub fn structFieldAlignment( - mod: *Module, + zcu: *Zcu, explicit_alignment: InternPool.Alignment, field_ty: Type, layout: std.builtin.Type.ContainerLayout, ) Alignment { + return zcu.structFieldAlignmentAdvanced(explicit_alignment, field_ty, layout, .normal) catch unreachable; +} + +/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed. +/// If `strat` is `.sema`, may perform type resolution. +pub fn structFieldAlignmentAdvanced( + zcu: *Zcu, + explicit_alignment: InternPool.Alignment, + field_ty: Type, + layout: std.builtin.Type.ContainerLayout, + strat: Type.ResolveStrat, +) SemaError!Alignment { assert(layout != .@"packed"); if (explicit_alignment != .none) return explicit_alignment; + const ty_abi_align = (try field_ty.abiAlignmentAdvanced(zcu, strat.toLazy())).scalar; switch (layout) { .@"packed" => unreachable, - .auto => { - if (mod.getTarget().ofmt == .c) { - return structFieldAlignmentExtern(mod, field_ty); - } else { - return field_ty.abiAlignment(mod); - } - }, - .@"extern" => return structFieldAlignmentExtern(mod, field_ty), + .auto => if (zcu.getTarget().ofmt != .c) return ty_abi_align, + .@"extern" => {}, } -} - -/// Returns the field alignment of an extern struct in byte units. -/// This logic is duplicated in Type.abiAlignmentAdvanced. -pub fn structFieldAlignmentExtern(mod: *Module, field_ty: Type) Alignment { - const ty_abi_align = field_ty.abiAlignment(mod); - - if (field_ty.isAbiInt(mod) and field_ty.intInfo(mod).bits >= 128) { - // The C ABI requires 128 bit integer fields of structs - // to be 16-bytes aligned. - return ty_abi_align.max(.@"16"); + // extern + if (field_ty.isAbiInt(zcu) and field_ty.intInfo(zcu).bits >= 128) { + return ty_abi_align.maxStrict(.@"16"); } - return ty_abi_align; } @@ -6480,3 +6504,29 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, Resolved return result; } + +pub fn getBuiltin(zcu: *Zcu, name: []const u8) Allocator.Error!Air.Inst.Ref { + const decl_index = try zcu.getBuiltinDecl(name); + zcu.ensureDeclAnalyzed(decl_index) catch @panic("std.builtin is corrupt"); + return Air.internedToRef(zcu.declPtr(decl_index).val.toIntern()); +} + +pub fn getBuiltinDecl(zcu: *Zcu, name: []const u8) Allocator.Error!InternPool.DeclIndex { + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + const std_file = (zcu.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig")).file; + const std_namespace = zcu.declPtr(std_file.root_decl.unwrap().?).getOwnedInnerNamespace(zcu).?; + const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls); + const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std.zig is corrupt and missing 'builtin'"); + zcu.ensureDeclAnalyzed(builtin_decl) catch @panic("std.builtin is corrupt"); + const builtin_namespace = zcu.declPtr(builtin_decl).getInnerNamespace(zcu) orelse @panic("std.builtin is corrupt"); + const name_str = try ip.getOrPutString(gpa, name, .no_embedded_nulls); + return builtin_namespace.decls.getKeyAdapted(name_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std/builtin.zig is corrupt"); +} + +pub fn getBuiltinType(zcu: *Zcu, name: []const u8) Allocator.Error!Type { + const ty_inst = try zcu.getBuiltin(name); + const ty = Type.fromInterned(ty_inst.toInterned() orelse @panic("std.builtin is corrupt")); + ty.resolveFully(zcu) catch @panic("std.builtin is corrupt"); + return ty; +} diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index b3718db5b1..02933929c8 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2603,7 +2603,10 @@ pub const Object = struct { if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue; const field_size = Type.fromInterned(field_ty).abiSize(mod); - const field_align = mod.unionFieldNormalAlignment(union_type, @intCast(field_index)); + const field_align: InternPool.Alignment = switch (union_type.flagsPtr(ip).layout) { + .@"packed" => .none, + .auto, .@"extern" => mod.unionFieldNormalAlignment(union_type, @intCast(field_index)), + }; const field_name = tag_type.names.get(ip)[field_index]; fields.appendAssumeCapacity(try o.builder.debugMemberType( diff --git a/src/print_value.zig b/src/print_value.zig index d2952c3d8e..394f021049 100644 --- a/src/print_value.zig +++ b/src/print_value.zig @@ -81,12 +81,12 @@ pub fn print( }), .int => |int| switch (int.storage) { inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}), - .lazy_align => |ty| if (opt_sema) |sema| { - const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar; + .lazy_align => |ty| if (opt_sema != null) { + const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .sema)).scalar; try writer.print("{}", .{a.toByteUnits() orelse 0}); } else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(mod)}), - .lazy_size => |ty| if (opt_sema) |sema| { - const s = (try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar; + .lazy_size => |ty| if (opt_sema != null) { + const s = (try Type.fromInterned(ty).abiSizeAdvanced(mod, .sema)).scalar; try writer.print("{}", .{s}); } else try writer.print("@sizeOf({})", .{Type.fromInterned(ty).fmt(mod)}), }, diff --git a/test/cases/compile_errors/direct_struct_loop.zig b/test/cases/compile_errors/direct_struct_loop.zig index 9fdda1bdc7..1eed8aad53 100644 --- a/test/cases/compile_errors/direct_struct_loop.zig +++ b/test/cases/compile_errors/direct_struct_loop.zig @@ -10,4 +10,3 @@ export fn entry() usize { // target=native // // :1:11: error: struct 'tmp.A' depends on itself -// :2:5: note: while checking this field diff --git a/test/cases/compile_errors/indirect_struct_loop.zig b/test/cases/compile_errors/indirect_struct_loop.zig index ef5526830e..02ec65f5ab 100644 --- a/test/cases/compile_errors/indirect_struct_loop.zig +++ b/test/cases/compile_errors/indirect_struct_loop.zig @@ -16,6 +16,3 @@ export fn entry() usize { // target=native // // :1:11: error: struct 'tmp.A' depends on itself -// :8:5: note: while checking this field -// :5:5: note: while checking this field -// :2:5: note: while checking this field diff --git a/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_struct_that_contains_itself.zig b/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_struct_that_contains_itself.zig index 74cafabe7c..11dd93d01e 100644 --- a/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_struct_that_contains_itself.zig +++ b/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_struct_that_contains_itself.zig @@ -13,4 +13,3 @@ export fn entry() usize { // target=native // // :1:13: error: struct 'tmp.Foo' depends on itself -// :2:5: note: while checking this field diff --git a/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_union_that_contains_itself.zig b/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_union_that_contains_itself.zig index 6030ca4d3e..8e499ab7e2 100644 --- a/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_union_that_contains_itself.zig +++ b/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_union_that_contains_itself.zig @@ -13,4 +13,3 @@ export fn entry() usize { // target=native // // :1:13: error: union 'tmp.Foo' depends on itself -// :2:5: note: while checking this field diff --git a/test/cases/compile_errors/invalid_dependency_on_struct_size.zig b/test/cases/compile_errors/invalid_dependency_on_struct_size.zig index 02ea7e2710..98c6224626 100644 --- a/test/cases/compile_errors/invalid_dependency_on_struct_size.zig +++ b/test/cases/compile_errors/invalid_dependency_on_struct_size.zig @@ -16,4 +16,3 @@ comptime { // target=native // // :6:21: error: struct layout depends on it having runtime bits -// :4:13: note: while checking this field diff --git a/test/cases/compile_errors/struct_depends_on_itself_via_optional_field.zig b/test/cases/compile_errors/struct_depends_on_itself_via_optional_field.zig index cad779e3d7..6a4cba82a6 100644 --- a/test/cases/compile_errors/struct_depends_on_itself_via_optional_field.zig +++ b/test/cases/compile_errors/struct_depends_on_itself_via_optional_field.zig @@ -15,5 +15,3 @@ export fn entry() void { // target=native // // :1:17: error: struct 'tmp.LhsExpr' depends on itself -// :5:5: note: while checking this field -// :2:5: note: while checking this field diff --git a/test/cases/compile_errors/struct_type_returned_from_non-generic_function.zig b/test/cases/compile_errors/struct_type_returned_from_non-generic_function.zig index f5647625dd..a0a6d37042 100644 --- a/test/cases/compile_errors/struct_type_returned_from_non-generic_function.zig +++ b/test/cases/compile_errors/struct_type_returned_from_non-generic_function.zig @@ -1,5 +1,5 @@ pub export fn entry(param: usize) usize { - return struct { param }; + return struct { @TypeOf(param) }; } // error diff --git a/test/src/Cases.zig b/test/src/Cases.zig index b8a3260ad6..dbf409f53b 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -395,10 +395,7 @@ fn addFromDirInner( if (entry.kind != .file) continue; // Ignore stuff such as .swp files - switch (Compilation.classifyFileExt(entry.basename)) { - .unknown => continue, - else => {}, - } + if (!knownFileExtension(entry.basename)) continue; try filenames.append(try ctx.arena.dupe(u8, entry.path)); } @@ -623,8 +620,6 @@ pub fn lowerToBuildSteps( b: *std.Build, parent_step: *std.Build.Step, test_filters: []const []const u8, - cases_dir_path: []const u8, - incremental_exe: *std.Build.Step.Compile, ) void { const host = std.zig.system.resolveTargetQuery(.{}) catch |err| std.debug.panic("unable to detect native host: {s}\n", .{@errorName(err)}); @@ -637,20 +632,11 @@ pub fn lowerToBuildSteps( // compilation is in a happier state. continue; } - for (test_filters) |test_filter| { - if (std.mem.indexOf(u8, incr_case.base_path, test_filter)) |_| break; - } else if (test_filters.len > 0) continue; - const case_base_path_with_dir = std.fs.path.join(b.allocator, &.{ - cases_dir_path, incr_case.base_path, - }) catch @panic("OOM"); - const run = b.addRunArtifact(incremental_exe); - run.setName(incr_case.base_path); - run.addArgs(&.{ - case_base_path_with_dir, - b.graph.zig_exe, - }); - run.expectStdOutEqual(""); - parent_step.dependOn(&run.step); + // TODO: the logic for running these was bad, so I've ripped it out. Rewrite this + // in a way that actually spawns the compiler, communicating with it over the + // compiler server protocol. + _ = incr_case; + @panic("TODO implement incremental test case executor"); } for (self.cases.items) |case| { @@ -1236,192 +1222,6 @@ const assert = std.debug.assert; const Allocator = std.mem.Allocator; const getExternalExecutor = std.zig.system.getExternalExecutor; -const Compilation = @import("../../src/Compilation.zig"); -const zig_h = @import("../../src/link.zig").File.C.zig_h; -const introspect = @import("../../src/introspect.zig"); -const ThreadPool = std.Thread.Pool; -const WaitGroup = std.Thread.WaitGroup; -const build_options = @import("build_options"); -const Package = @import("../../src/Package.zig"); - -pub const std_options = .{ - .log_level = .err, -}; - -var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{ - .stack_trace_frames = build_options.mem_leak_frames, -}){}; - -// TODO: instead of embedding the compiler in this process, spawn the compiler -// as a sub-process and communicate the updates using the compiler protocol. -pub fn main() !void { - const use_gpa = build_options.force_gpa or !builtin.link_libc; - const gpa = gpa: { - if (use_gpa) { - break :gpa general_purpose_allocator.allocator(); - } - // We would prefer to use raw libc allocator here, but cannot - // use it if it won't support the alignment we need. - if (@alignOf(std.c.max_align_t) < @alignOf(i128)) { - break :gpa std.heap.c_allocator; - } - break :gpa std.heap.raw_c_allocator; - }; - - var single_threaded_arena = std.heap.ArenaAllocator.init(gpa); - defer single_threaded_arena.deinit(); - - var thread_safe_arena: std.heap.ThreadSafeAllocator = .{ - .child_allocator = single_threaded_arena.allocator(), - }; - const arena = thread_safe_arena.allocator(); - - const args = try std.process.argsAlloc(arena); - const case_file_path = args[1]; - const zig_exe_path = args[2]; - - var filenames = std.ArrayList([]const u8).init(arena); - - const case_dirname = std.fs.path.dirname(case_file_path).?; - var iterable_dir = try std.fs.cwd().openDir(case_dirname, .{ .iterate = true }); - defer iterable_dir.close(); - - if (std.mem.endsWith(u8, case_file_path, ".0.zig")) { - const stem = case_file_path[case_dirname.len + 1 .. case_file_path.len - "0.zig".len]; - var it = iterable_dir.iterate(); - while (try it.next()) |entry| { - if (entry.kind != .file) continue; - if (!std.mem.startsWith(u8, entry.name, stem)) continue; - try filenames.append(try std.fs.path.join(arena, &.{ case_dirname, entry.name })); - } - } else { - try filenames.append(case_file_path); - } - - if (filenames.items.len == 0) { - std.debug.print("failed to find the input source file(s) from '{s}'\n", .{ - case_file_path, - }); - std.process.exit(1); - } - - // Sort filenames, so that incremental tests are contiguous and in-order - sortTestFilenames(filenames.items); - - var ctx = Cases.init(gpa, arena); - - var test_it = TestIterator{ .filenames = filenames.items }; - while (try test_it.next()) |batch| { - const strategy: TestStrategy = if (batch.len > 1) .incremental else .independent; - var cases = std.ArrayList(usize).init(arena); - - for (batch) |filename| { - const max_file_size = 10 * 1024 * 1024; - const src = try iterable_dir.readFileAllocOptions(arena, filename, max_file_size, null, 1, 0); - - // Parse the manifest - var manifest = try TestManifest.parse(arena, src); - - if (cases.items.len == 0) { - const backends = try manifest.getConfigForKeyAlloc(arena, "backend", Backend); - const targets = try manifest.getConfigForKeyAlloc(arena, "target", std.Target.Query); - const c_frontends = try manifest.getConfigForKeyAlloc(ctx.arena, "c_frontend", CFrontend); - const is_test = try manifest.getConfigForKeyAssertSingle("is_test", bool); - const link_libc = try manifest.getConfigForKeyAssertSingle("link_libc", bool); - const output_mode = try manifest.getConfigForKeyAssertSingle("output_mode", std.builtin.OutputMode); - - if (manifest.type == .translate_c) { - for (c_frontends) |c_frontend| { - for (targets) |target_query| { - const output = try manifest.trailingLinesSplit(ctx.arena); - try ctx.translate.append(.{ - .name = std.fs.path.stem(filename), - .c_frontend = c_frontend, - .target = resolveTargetQuery(target_query), - .is_test = is_test, - .link_libc = link_libc, - .input = src, - .kind = .{ .translate = output }, - }); - } - } - continue; - } - if (manifest.type == .run_translated_c) { - for (c_frontends) |c_frontend| { - for (targets) |target_query| { - const output = try manifest.trailingSplit(ctx.arena); - try ctx.translate.append(.{ - .name = std.fs.path.stem(filename), - .c_frontend = c_frontend, - .target = resolveTargetQuery(target_query), - .is_test = is_test, - .link_libc = link_libc, - .output = output, - .input = src, - .kind = .{ .run = output }, - }); - } - } - continue; - } - - // Cross-product to get all possible test combinations - for (backends) |backend| { - for (targets) |target| { - const next = ctx.cases.items.len; - try ctx.cases.append(.{ - .name = std.fs.path.stem(filename), - .target = target, - .backend = backend, - .updates = std.ArrayList(Cases.Update).init(ctx.cases.allocator), - .is_test = is_test, - .output_mode = output_mode, - .link_libc = backend == .llvm, - .deps = std.ArrayList(DepModule).init(ctx.cases.allocator), - }); - try cases.append(next); - } - } - } - - for (cases.items) |case_index| { - const case = &ctx.cases.items[case_index]; - if (strategy == .incremental and case.backend == .stage2 and case.target.getCpuArch() == .x86_64 and !case.link_libc and case.target.getOsTag() != .plan9) { - // https://github.com/ziglang/zig/issues/15174 - continue; - } - - switch (manifest.type) { - .compile => { - case.addCompile(src); - }, - .@"error" => { - const errors = try manifest.trailingLines(arena); - switch (strategy) { - .independent => { - case.addError(src, errors); - }, - .incremental => { - case.addErrorNamed("update", src, errors); - }, - } - }, - .run => { - const output = try manifest.trailingSplit(ctx.arena); - case.addCompareOutput(src, output); - }, - .translate_c => @panic("c_frontend specified for compile case"), - .run_translated_c => @panic("c_frontend specified for compile case"), - .cli => @panic("TODO cli tests"), - } - } - } - } - - return runCases(&ctx, zig_exe_path); -} - fn resolveTargetQuery(query: std.Target.Query) std.Build.ResolvedTarget { return .{ .query = query, @@ -1430,470 +1230,33 @@ fn resolveTargetQuery(query: std.Target.Query) std.Build.ResolvedTarget { }; } -fn runCases(self: *Cases, zig_exe_path: []const u8) !void { - const host = try std.zig.system.resolveTargetQuery(.{}); - - var progress = std.Progress{}; - const root_node = progress.start("compiler", self.cases.items.len); - progress.terminal = null; - defer root_node.end(); - - var zig_lib_directory = try introspect.findZigLibDirFromSelfExe(self.gpa, zig_exe_path); - defer zig_lib_directory.handle.close(); - defer self.gpa.free(zig_lib_directory.path.?); - - var aux_thread_pool: ThreadPool = undefined; - try aux_thread_pool.init(.{ .allocator = self.gpa }); - defer aux_thread_pool.deinit(); - - // Use the same global cache dir for all the tests, such that we for example don't have to - // rebuild musl libc for every case (when LLVM backend is enabled). - var global_tmp = std.testing.tmpDir(.{}); - defer global_tmp.cleanup(); - - var cache_dir = try global_tmp.dir.makeOpenPath(".zig-cache", .{}); - defer cache_dir.close(); - const tmp_dir_path = try std.fs.path.join(self.gpa, &[_][]const u8{ ".", ".zig-cache", "tmp", &global_tmp.sub_path }); - defer self.gpa.free(tmp_dir_path); - - const global_cache_directory: Compilation.Directory = .{ - .handle = cache_dir, - .path = try std.fs.path.join(self.gpa, &[_][]const u8{ tmp_dir_path, ".zig-cache" }), - }; - defer self.gpa.free(global_cache_directory.path.?); - - { - for (self.cases.items) |*case| { - if (build_options.skip_non_native) { - if (case.target.getCpuArch() != builtin.cpu.arch) - continue; - if (case.target.getObjectFormat() != builtin.object_format) - continue; - } - - // Skip tests that require LLVM backend when it is not available - if (!build_options.have_llvm and case.backend == .llvm) - continue; - - assert(case.backend != .stage1); - - for (build_options.test_filters) |test_filter| { - if (std.mem.indexOf(u8, case.name, test_filter)) |_| break; - } else if (build_options.test_filters.len > 0) continue; - - var prg_node = root_node.start(case.name, case.updates.items.len); - prg_node.activate(); - defer prg_node.end(); - - try runOneCase( - self.gpa, - &prg_node, - case.*, - zig_lib_directory, - zig_exe_path, - &aux_thread_pool, - global_cache_directory, - host, - ); - } - - for (self.translate.items) |*case| { - _ = case; - @panic("TODO is this even used?"); - } - } -} - -fn runOneCase( - allocator: Allocator, - root_node: *std.Progress.Node, - case: Case, - zig_lib_directory: Compilation.Directory, - zig_exe_path: []const u8, - thread_pool: *ThreadPool, - global_cache_directory: Compilation.Directory, - host: std.Target, -) !void { - const tmp_src_path = "tmp.zig"; - const enable_rosetta = build_options.enable_rosetta; - const enable_qemu = build_options.enable_qemu; - const enable_wine = build_options.enable_wine; - const enable_wasmtime = build_options.enable_wasmtime; - const enable_darling = build_options.enable_darling; - const glibc_runtimes_dir: ?[]const u8 = build_options.glibc_runtimes_dir; - - const target = try std.zig.system.resolveTargetQuery(case.target); - - var arena_allocator = std.heap.ArenaAllocator.init(allocator); - defer arena_allocator.deinit(); - const arena = arena_allocator.allocator(); - - var tmp = std.testing.tmpDir(.{}); - defer tmp.cleanup(); - - var cache_dir = try tmp.dir.makeOpenPath(".zig-cache", .{}); - defer cache_dir.close(); - - const tmp_dir_path = try std.fs.path.join( - arena, - &[_][]const u8{ ".", ".zig-cache", "tmp", &tmp.sub_path }, - ); - const local_cache_path = try std.fs.path.join( - arena, - &[_][]const u8{ tmp_dir_path, ".zig-cache" }, - ); - - const zig_cache_directory: Compilation.Directory = .{ - .handle = cache_dir, - .path = local_cache_path, - }; - - var main_pkg: Package = .{ - .root_src_directory = .{ .path = tmp_dir_path, .handle = tmp.dir }, - .root_src_path = tmp_src_path, - }; - defer { - var it = main_pkg.table.iterator(); - while (it.next()) |kv| { - allocator.free(kv.key_ptr.*); - kv.value_ptr.*.destroy(allocator); - } - main_pkg.table.deinit(allocator); - } - - for (case.deps.items) |dep| { - var pkg = try Package.create( - allocator, - tmp_dir_path, - dep.path, - ); - errdefer pkg.destroy(allocator); - try main_pkg.add(allocator, dep.name, pkg); +fn knownFileExtension(filename: []const u8) bool { + // List taken from `Compilation.classifyFileExt` in the compiler. + for ([_][]const u8{ + ".c", ".C", ".cc", ".cpp", + ".cxx", ".stub", ".m", ".mm", + ".ll", ".bc", ".s", ".S", + ".h", ".zig", ".so", ".dll", + ".dylib", ".tbd", ".a", ".lib", + ".o", ".obj", ".cu", ".def", + ".rc", ".res", ".manifest", + }) |ext| { + if (std.mem.endsWith(u8, filename, ext)) return true; } - - const bin_name = try std.zig.binNameAlloc(arena, .{ - .root_name = "test_case", - .target = target, - .output_mode = case.output_mode, - }); - - const emit_directory: Compilation.Directory = .{ - .path = tmp_dir_path, - .handle = tmp.dir, - }; - const emit_bin: Compilation.EmitLoc = .{ - .directory = emit_directory, - .basename = bin_name, - }; - const emit_h: ?Compilation.EmitLoc = if (case.emit_h) .{ - .directory = emit_directory, - .basename = "test_case.h", - } else null; - const use_llvm: bool = switch (case.backend) { - .llvm => true, - else => false, - }; - const comp = try Compilation.create(allocator, .{ - .local_cache_directory = zig_cache_directory, - .global_cache_directory = global_cache_directory, - .zig_lib_directory = zig_lib_directory, - .thread_pool = thread_pool, - .root_name = "test_case", - .target = target, - // TODO: support tests for object file building, and library builds - // and linking. This will require a rework to support multi-file - // tests. - .output_mode = case.output_mode, - .is_test = case.is_test, - .optimize_mode = case.optimize_mode, - .emit_bin = emit_bin, - .emit_h = emit_h, - .main_pkg = &main_pkg, - .keep_source_files_loaded = true, - .is_native_os = case.target.isNativeOs(), - .is_native_abi = case.target.isNativeAbi(), - .dynamic_linker = target.dynamic_linker.get(), - .link_libc = case.link_libc, - .use_llvm = use_llvm, - .self_exe_path = zig_exe_path, - // TODO instead of turning off color, pass in a std.Progress.Node - .color = .off, - .reference_trace = 0, - // TODO: force self-hosted linkers with stage2 backend to avoid LLD creeping in - // until the auto-select mechanism deems them worthy - .use_lld = switch (case.backend) { - .stage2 => false, - else => null, - }, - }); - defer comp.destroy(); - - update: for (case.updates.items, 0..) |update, update_index| { - var update_node = root_node.start(update.name, 3); - update_node.activate(); - defer update_node.end(); - - var sync_node = update_node.start("write", 0); - sync_node.activate(); - for (update.files.items) |file| { - try tmp.dir.writeFile(.{ .sub_path = file.path, .data = file.src }); - } - sync_node.end(); - - var module_node = update_node.start("parse/analysis/codegen", 0); - module_node.activate(); - try comp.makeBinFileWritable(); - try comp.update(&module_node); - module_node.end(); - - if (update.case != .Error) { - var all_errors = try comp.getAllErrorsAlloc(); - defer all_errors.deinit(allocator); - if (all_errors.errorMessageCount() > 0) { - all_errors.renderToStdErr(.{ - .ttyconf = std.io.tty.detectConfig(std.io.getStdErr()), - }); - // TODO print generated C code - return error.UnexpectedCompileErrors; - } - } - - switch (update.case) { - .Header => |expected_output| { - var file = try tmp.dir.openFile("test_case.h", .{ .mode = .read_only }); - defer file.close(); - const out = try file.reader().readAllAlloc(arena, 5 * 1024 * 1024); - - try std.testing.expectEqualStrings(expected_output, out); - }, - .CompareObjectFile => |expected_output| { - var file = try tmp.dir.openFile(bin_name, .{ .mode = .read_only }); - defer file.close(); - const out = try file.reader().readAllAlloc(arena, 5 * 1024 * 1024); - - try std.testing.expectEqualStrings(expected_output, out); - }, - .Compile => {}, - .Error => |expected_errors| { - var test_node = update_node.start("assert", 0); - test_node.activate(); - defer test_node.end(); - - var error_bundle = try comp.getAllErrorsAlloc(); - defer error_bundle.deinit(allocator); - - if (error_bundle.errorMessageCount() == 0) { - return error.ExpectedCompilationErrors; - } - - var actual_stderr = std.ArrayList(u8).init(arena); - try error_bundle.renderToWriter(.{ - .ttyconf = .no_color, - .include_reference_trace = false, - .include_source_line = false, - }, actual_stderr.writer()); - - // Render the expected lines into a string that we can compare verbatim. - var expected_generated = std.ArrayList(u8).init(arena); - - var actual_line_it = std.mem.splitScalar(u8, actual_stderr.items, '\n'); - for (expected_errors) |expect_line| { - const actual_line = actual_line_it.next() orelse { - try expected_generated.appendSlice(expect_line); - try expected_generated.append('\n'); - continue; - }; - if (std.mem.endsWith(u8, actual_line, expect_line)) { - try expected_generated.appendSlice(actual_line); - try expected_generated.append('\n'); - continue; - } - if (std.mem.startsWith(u8, expect_line, ":?:?: ")) { - if (std.mem.endsWith(u8, actual_line, expect_line[":?:?: ".len..])) { - try expected_generated.appendSlice(actual_line); - try expected_generated.append('\n'); - continue; - } - } - try expected_generated.appendSlice(expect_line); - try expected_generated.append('\n'); - } - - try std.testing.expectEqualStrings(expected_generated.items, actual_stderr.items); - }, - .Execution => |expected_stdout| { - if (!std.process.can_spawn) { - std.debug.print("Unable to spawn child processes on {s}, skipping test.\n", .{@tagName(builtin.os.tag)}); - continue :update; // Pass test. - } - - update_node.setEstimatedTotalItems(4); - - var argv = std.ArrayList([]const u8).init(allocator); - defer argv.deinit(); - - const exec_result = x: { - var exec_node = update_node.start("execute", 0); - exec_node.activate(); - defer exec_node.end(); - - // We go out of our way here to use the unique temporary directory name in - // the exe_path so that it makes its way into the cache hash, avoiding - // cache collisions from multiple threads doing `zig run` at the same time - // on the same test_case.c input filename. - const ss = std.fs.path.sep_str; - const exe_path = try std.fmt.allocPrint( - arena, - ".." ++ ss ++ "{s}" ++ ss ++ "{s}", - .{ &tmp.sub_path, bin_name }, - ); - if (case.target.ofmt != null and case.target.ofmt.? == .c) { - if (getExternalExecutor(host, &target, .{ .link_libc = true }) != .native) { - // We wouldn't be able to run the compiled C code. - continue :update; // Pass test. - } - try argv.appendSlice(&[_][]const u8{ - zig_exe_path, - "run", - "-cflags", - "-std=c99", - "-pedantic", - "-Werror", - "-Wno-incompatible-library-redeclaration", // https://github.com/ziglang/zig/issues/875 - "--", - "-lc", - exe_path, - }); - if (zig_lib_directory.path) |p| { - try argv.appendSlice(&.{ "-I", p }); - } - } else switch (getExternalExecutor(host, &target, .{ .link_libc = case.link_libc })) { - .native => { - if (case.backend == .stage2 and case.target.getCpuArch().isArmOrThumb()) { - // https://github.com/ziglang/zig/issues/13623 - continue :update; // Pass test. - } - try argv.append(exe_path); - }, - .bad_dl, .bad_os_or_cpu => continue :update, // Pass test. - - .rosetta => if (enable_rosetta) { - try argv.append(exe_path); - } else { - continue :update; // Rosetta not available, pass test. - }, - - .qemu => |qemu_bin_name| if (enable_qemu) { - const need_cross_glibc = target.isGnuLibC() and case.link_libc; - const glibc_dir_arg: ?[]const u8 = if (need_cross_glibc) - glibc_runtimes_dir orelse continue :update // glibc dir not available; pass test - else - null; - try argv.append(qemu_bin_name); - if (glibc_dir_arg) |dir| { - const linux_triple = try target.linuxTriple(arena); - const full_dir = try std.fs.path.join(arena, &[_][]const u8{ - dir, - linux_triple, - }); - - try argv.append("-L"); - try argv.append(full_dir); - } - try argv.append(exe_path); - } else { - continue :update; // QEMU not available; pass test. - }, - - .wine => |wine_bin_name| if (enable_wine) { - try argv.append(wine_bin_name); - try argv.append(exe_path); - } else { - continue :update; // Wine not available; pass test. - }, - - .wasmtime => |wasmtime_bin_name| if (enable_wasmtime) { - try argv.append(wasmtime_bin_name); - try argv.append("--dir=."); - try argv.append(exe_path); - } else { - continue :update; // wasmtime not available; pass test. - }, - - .darling => |darling_bin_name| if (enable_darling) { - try argv.append(darling_bin_name); - // Since we use relative to cwd here, we invoke darling with - // "shell" subcommand. - try argv.append("shell"); - try argv.append(exe_path); - } else { - continue :update; // Darling not available; pass test. - }, - } - - try comp.makeBinFileExecutable(); - - while (true) { - break :x std.process.Child.run(.{ - .allocator = allocator, - .argv = argv.items, - .cwd_dir = tmp.dir, - .cwd = tmp_dir_path, - }) catch |err| switch (err) { - error.FileBusy => { - // There is a fundamental design flaw in Unix systems with how - // ETXTBSY interacts with fork+exec. - // https://github.com/golang/go/issues/22315 - // https://bugs.openjdk.org/browse/JDK-8068370 - // Unfortunately, this could be a real error, but we can't - // tell the difference here. - continue; - }, - else => { - std.debug.print("\n{s}.{d} The following command failed with {s}:\n", .{ - case.name, update_index, @errorName(err), - }); - dumpArgs(argv.items); - return error.ChildProcessExecution; - }, - }; - } - }; - var test_node = update_node.start("test", 0); - test_node.activate(); - defer test_node.end(); - defer allocator.free(exec_result.stdout); - defer allocator.free(exec_result.stderr); - switch (exec_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("\n{s}\n{s}: execution exited with code {d}:\n", .{ - exec_result.stderr, case.name, code, - }); - dumpArgs(argv.items); - return error.ChildProcessExecution; - } - }, - else => { - std.debug.print("\n{s}\n{s}: execution crashed:\n", .{ - exec_result.stderr, case.name, - }); - dumpArgs(argv.items); - return error.ChildProcessExecution; - }, - } - try std.testing.expectEqualStrings(expected_stdout, exec_result.stdout); - // We allow stderr to have garbage in it because wasmtime prints a - // warning about --invoke even though we don't pass it. - //std.testing.expectEqualStrings("", exec_result.stderr); - }, - } - } -} - -fn dumpArgs(argv: []const []const u8) void { - for (argv) |arg| { - std.debug.print("{s} ", .{arg}); + // Final check for .so.X, .so.X.Y, .so.X.Y.Z. + // From `Compilation.hasSharedLibraryExt`. + var it = std.mem.splitScalar(u8, filename, '.'); + _ = it.first(); + var so_txt = it.next() orelse return false; + while (!std.mem.eql(u8, so_txt, "so")) { + so_txt = it.next() orelse return false; } - std.debug.print("\n", .{}); + const n1 = it.next() orelse return false; + const n2 = it.next(); + const n3 = it.next(); + _ = std.fmt.parseInt(u32, n1, 10) catch return false; + if (n2) |x| _ = std.fmt.parseInt(u32, x, 10) catch return false; + if (n3) |x| _ = std.fmt.parseInt(u32, x, 10) catch return false; + if (it.next() != null) return false; + return false; } diff --git a/test/tests.zig b/test/tests.zig index 2202936d59..95a86c68f6 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -1250,7 +1250,6 @@ pub fn addCases( b: *std.Build, parent_step: *Step, test_filters: []const []const u8, - check_case_exe: *std.Build.Step.Compile, target: std.Build.ResolvedTarget, translate_c_options: @import("src/Cases.zig").TranslateCOptions, build_options: @import("cases.zig").BuildOptions, @@ -1268,12 +1267,9 @@ pub fn addCases( cases.lowerToTranslateCSteps(b, parent_step, test_filters, target, translate_c_options); - const cases_dir_path = try b.build_root.join(b.allocator, &.{ "test", "cases" }); cases.lowerToBuildSteps( b, parent_step, test_filters, - cases_dir_path, - check_case_exe, ); } -- cgit v1.2.3 From 00da182e6875845d5727c399b3738a13b262832e Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 30 Jun 2024 00:11:51 -0400 Subject: cbe: fix for export changes --- lib/zig.h | 8 +- src/Compilation.zig | 3 + src/Zcu.zig | 14 ++ src/codegen/c.zig | 378 ++++++++++++++++++++-------------------------------- src/link.zig | 1 - src/link/C.zig | 158 ++++++++++++++++------ 6 files changed, 286 insertions(+), 276 deletions(-) (limited to 'src/codegen') diff --git a/lib/zig.h b/lib/zig.h index 1171c7efac..f3b3897186 100644 --- a/lib/zig.h +++ b/lib/zig.h @@ -207,16 +207,16 @@ typedef char bool; __asm(zig_mangle_c(name) " = " zig_mangle_c(symbol)) #endif +#define zig_mangled_tentative zig_mangled +#define zig_mangled_final zig_mangled #if _MSC_VER -#define zig_mangled_tentative(mangled, unmangled) -#define zig_mangled_final(mangled, unmangled) ; \ +#define zig_mangled(mangled, unmangled) ; \ zig_export(#mangled, unmangled) #define zig_mangled_export(mangled, unmangled, symbol) \ zig_export(unmangled, #mangled) \ zig_export(symbol, unmangled) #else /* _MSC_VER */ -#define zig_mangled_tentative(mangled, unmangled) __asm(zig_mangle_c(unmangled)) -#define zig_mangled_final(mangled, unmangled) zig_mangled_tentative(mangled, unmangled) +#define zig_mangled(mangled, unmangled) __asm(zig_mangle_c(unmangled)) #define zig_mangled_export(mangled, unmangled, symbol) \ zig_mangled_final(mangled, unmangled) \ zig_export(symbol, unmangled) diff --git a/src/Compilation.zig b/src/Compilation.zig index 7447d589fd..185a9a6260 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3466,6 +3466,9 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo }; }, .emit_h_decl => |decl_index| { + if (true) @panic("regressed compiler feature: emit-h should hook into updateExports, " ++ + "not decl analysis, which is too early to know about @export calls"); + const module = comp.module.?; const decl = module.declPtr(decl_index); diff --git a/src/Zcu.zig b/src/Zcu.zig index e3b85e957d..adfe60e678 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -268,6 +268,20 @@ pub const Exported = union(enum) { decl_index: Decl.Index, /// Constant value being exported. value: InternPool.Index, + + pub fn getValue(exported: Exported, zcu: *Zcu) Value { + return switch (exported) { + .decl_index => |decl_index| zcu.declPtr(decl_index).val, + .value => |value| Value.fromInterned(value), + }; + } + + pub fn getAlign(exported: Exported, zcu: *Zcu) Alignment { + return switch (exported) { + .decl_index => |decl_index| zcu.declPtr(decl_index).alignment, + .value => .none, + }; + } }; pub const Export = struct { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 13d9e67519..92e9edb433 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -731,8 +731,6 @@ pub const DeclGen = struct { if (decl.val.getExternFunc(zcu)) |extern_func| if (extern_func.decl != decl_index) return dg.renderDeclValue(writer, extern_func.decl, location); - if (decl.val.getVariable(zcu)) |variable| try dg.renderFwdDecl(decl_index, variable, .tentative); - // We shouldn't cast C function pointers as this is UB (when you call // them). The analysis until now should ensure that the C function // pointers are compatible. If they are not, then there is a bug @@ -748,7 +746,7 @@ pub const DeclGen = struct { try writer.writeByte(')'); } try writer.writeByte('&'); - try dg.renderDeclName(writer, decl_index, 0); + try dg.renderDeclName(writer, decl_index); if (need_cast) try writer.writeByte(')'); } @@ -1765,19 +1763,22 @@ pub const DeclGen = struct { fn renderFunctionSignature( dg: *DeclGen, w: anytype, - fn_decl_index: InternPool.DeclIndex, + fn_val: Value, + fn_align: InternPool.Alignment, kind: CType.Kind, name: union(enum) { - export_index: u32, - ident: []const u8, + decl: InternPool.DeclIndex, fmt_ctype_pool_string: std.fmt.Formatter(formatCTypePoolString), + @"export": struct { + main_name: InternPool.NullTerminatedString, + extern_name: InternPool.NullTerminatedString, + }, }, ) !void { const zcu = dg.zcu; const ip = &zcu.intern_pool; - const fn_decl = zcu.declPtr(fn_decl_index); - const fn_ty = fn_decl.typeOf(zcu); + const fn_ty = fn_val.typeOf(zcu); const fn_ctype = try dg.ctypeFromType(fn_ty, kind); const fn_info = zcu.typeToFunc(fn_ty).?; @@ -1788,7 +1789,7 @@ pub const DeclGen = struct { else => unreachable, } } - if (fn_decl.val.getFunction(zcu)) |func| if (func.analysis(ip).is_cold) + if (fn_val.getFunction(zcu)) |func| if (func.analysis(ip).is_cold) try w.writeAll("zig_cold "); if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn "); @@ -1799,22 +1800,11 @@ pub const DeclGen = struct { trailing = .maybe_space; } - switch (kind) { - .forward => {}, - .complete => if (fn_decl.alignment.toByteUnits()) |a| { - try w.print("{}zig_align_fn({})", .{ trailing, a }); - trailing = .maybe_space; - }, - else => unreachable, - } - + try w.print("{}", .{trailing}); switch (name) { - .export_index => |export_index| { - try w.print("{}", .{trailing}); - try dg.renderDeclName(w, fn_decl_index, export_index); - }, - .ident => |ident| try w.print("{}{ }", .{ trailing, fmtIdent(ident) }), - .fmt_ctype_pool_string => |fmt| try w.print("{}{ }", .{ trailing, fmt }), + .decl => |decl_index| try dg.renderDeclName(w, decl_index), + .fmt_ctype_pool_string => |fmt| try w.print("{ }", .{fmt}), + .@"export" => |@"export"| try w.print("{ }", .{fmtIdent(@"export".extern_name.toSlice(ip))}), } try renderTypeSuffix( @@ -1833,44 +1823,30 @@ pub const DeclGen = struct { switch (kind) { .forward => { - if (fn_decl.alignment.toByteUnits()) |a| { - try w.print(" zig_align_fn({})", .{a}); - } + if (fn_align.toByteUnits()) |a| try w.print(" zig_align_fn({})", .{a}); switch (name) { - .export_index => |export_index| mangled: { - const maybe_exports = zcu.decl_exports.get(fn_decl_index); - const external_name = (if (maybe_exports) |exports| - exports.items[export_index].opts.name - else if (fn_decl.isExtern(zcu)) - fn_decl.name - else - break :mangled).toSlice(ip); - const is_mangled = isMangledIdent(external_name, true); - const is_export = export_index > 0; + .decl, .fmt_ctype_pool_string => {}, + .@"export" => |@"export"| { + const extern_name = @"export".extern_name.toSlice(ip); + const is_mangled = isMangledIdent(extern_name, true); + const is_export = @"export".extern_name != @"export".main_name; if (is_mangled and is_export) { try w.print(" zig_mangled_export({ }, {s}, {s})", .{ - fmtIdent(external_name), - fmtStringLiteral(external_name, null), - fmtStringLiteral( - maybe_exports.?.items[0].opts.name.toSlice(ip), - null, - ), + fmtIdent(extern_name), + fmtStringLiteral(extern_name, null), + fmtStringLiteral(@"export".main_name.toSlice(ip), null), }); } else if (is_mangled) { - try w.print(" zig_mangled_final({ }, {s})", .{ - fmtIdent(external_name), fmtStringLiteral(external_name, null), + try w.print(" zig_mangled({ }, {s})", .{ + fmtIdent(extern_name), fmtStringLiteral(extern_name, null), }); } else if (is_export) { try w.print(" zig_export({s}, {s})", .{ - fmtStringLiteral( - maybe_exports.?.items[0].opts.name.toSlice(ip), - null, - ), - fmtStringLiteral(external_name, null), + fmtStringLiteral(@"export".main_name.toSlice(ip), null), + fmtStringLiteral(extern_name, null), }); } }, - .ident, .fmt_ctype_pool_string => {}, } }, .complete => {}, @@ -2085,21 +2061,11 @@ pub const DeclGen = struct { try renderTypeSuffix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, .{}); } - fn declIsGlobal(dg: *DeclGen, val: Value) bool { - const zcu = dg.zcu; - return switch (zcu.intern_pool.indexToKey(val.toIntern())) { - .variable => |variable| zcu.decl_exports.contains(variable.decl), - .extern_func => true, - .func => |func| zcu.decl_exports.contains(func.owner_decl), - else => unreachable, - }; - } - fn writeName(dg: *DeclGen, w: anytype, c_value: CValue) !void { switch (c_value) { .new_local, .local => |i| try w.print("t{d}", .{i}), .constant => |val| try renderAnonDeclName(w, val), - .decl => |decl| try dg.renderDeclName(w, decl, 0), + .decl => |decl| try dg.renderDeclName(w, decl), .identifier => |ident| try w.print("{ }", .{fmtIdent(ident)}), else => unreachable, } @@ -2111,10 +2077,10 @@ pub const DeclGen = struct { .constant => |val| try renderAnonDeclName(w, val), .arg, .arg_array => unreachable, .field => |i| try w.print("f{d}", .{i}), - .decl => |decl| try dg.renderDeclName(w, decl, 0), + .decl => |decl| try dg.renderDeclName(w, decl), .decl_ref => |decl| { try w.writeByte('&'); - try dg.renderDeclName(w, decl, 0); + try dg.renderDeclName(w, decl); }, .undef => |ty| try dg.renderUndefValue(w, ty, .Other), .identifier => |ident| try w.print("{ }", .{fmtIdent(ident)}), @@ -2142,10 +2108,10 @@ pub const DeclGen = struct { .field => |i| try w.print("f{d}", .{i}), .decl => |decl| { try w.writeAll("(*"); - try dg.renderDeclName(w, decl, 0); + try dg.renderDeclName(w, decl); try w.writeByte(')'); }, - .decl_ref => |decl| try dg.renderDeclName(w, decl, 0), + .decl_ref => |decl| try dg.renderDeclName(w, decl), .undef => unreachable, .identifier => |ident| try w.print("(*{ })", .{fmtIdent(ident)}), .payload_identifier => |ident| try w.print("(*{ }.{ })", .{ @@ -2195,19 +2161,12 @@ pub const DeclGen = struct { dg: *DeclGen, decl_index: InternPool.DeclIndex, variable: InternPool.Key.Variable, - fwd_kind: enum { tentative, final }, ) !void { const zcu = dg.zcu; const decl = zcu.declPtr(decl_index); const fwd = dg.fwdDeclWriter(); - const is_global = variable.is_extern or dg.declIsGlobal(decl.val); - try fwd.writeAll(if (is_global) "zig_extern " else "static "); - const maybe_exports = zcu.decl_exports.get(decl_index); - const export_weak_linkage = if (maybe_exports) |exports| - exports.items[0].opts.linkage == .weak - else - false; - if (variable.is_weak_linkage or export_weak_linkage) try fwd.writeAll("zig_weak_linkage "); + try fwd.writeAll(if (variable.is_extern) "zig_extern " else "static "); + if (variable.is_weak_linkage) try fwd.writeAll("zig_weak_linkage "); if (variable.is_threadlocal and !dg.mod.single_threaded) try fwd.writeAll("zig_threadlocal "); try dg.renderTypeAndName( fwd, @@ -2217,38 +2176,17 @@ pub const DeclGen = struct { decl.alignment, .complete, ); - mangled: { - const external_name = (if (maybe_exports) |exports| - exports.items[0].opts.name - else if (variable.is_extern) - decl.name - else - break :mangled).toSlice(&zcu.intern_pool); - if (isMangledIdent(external_name, true)) { - try fwd.print(" zig_mangled_{s}({ }, {s})", .{ - @tagName(fwd_kind), - fmtIdent(external_name), - fmtStringLiteral(external_name, null), - }); - } - } try fwd.writeAll(";\n"); } - fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: InternPool.DeclIndex, export_index: u32) !void { + fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: InternPool.DeclIndex) !void { const zcu = dg.zcu; const ip = &zcu.intern_pool; const decl = zcu.declPtr(decl_index); - if (zcu.decl_exports.get(decl_index)) |exports| { - try writer.print("{ }", .{ - fmtIdent(exports.items[export_index].opts.name.toSlice(ip)), - }); - } else if (decl.getExternDecl(zcu).unwrap()) |extern_decl_index| { - try writer.print("{ }", .{ - fmtIdent(zcu.declPtr(extern_decl_index).name.toSlice(ip)), - }); - } else { + if (decl.getExternDecl(zcu).unwrap()) |extern_decl_index| try writer.print("{ }", .{ + fmtIdent(zcu.declPtr(extern_decl_index).name.toSlice(ip)), + }) else { // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), // expand to 3x the length of its input, but let's cut it off at a much shorter limit. var name: [100]u8 = undefined; @@ -2761,69 +2699,6 @@ pub fn genErrDecls(o: *Object) !void { try writer.writeAll("};\n"); } -fn genExports(o: *Object) !void { - const tracy = trace(@src()); - defer tracy.end(); - - const zcu = o.dg.zcu; - const ip = &zcu.intern_pool; - const decl_index = switch (o.dg.pass) { - .decl => |decl| decl, - .anon, .flush => return, - }; - const decl = zcu.declPtr(decl_index); - const fwd = o.dg.fwdDeclWriter(); - - const exports = zcu.decl_exports.get(decl_index) orelse return; - if (exports.items.len < 2) return; - - const is_variable_const = switch (ip.indexToKey(decl.val.toIntern())) { - .func => return for (exports.items[1..], 1..) |@"export", i| { - try fwd.writeAll("zig_extern "); - if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage_fn "); - try o.dg.renderFunctionSignature( - fwd, - decl_index, - .forward, - .{ .export_index = @intCast(i) }, - ); - try fwd.writeAll(";\n"); - }, - .extern_func => { - // TODO: when sema allows re-exporting extern decls - unreachable; - }, - .variable => |variable| variable.is_const, - else => true, - }; - for (exports.items[1..]) |@"export"| { - try fwd.writeAll("zig_extern "); - if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage "); - const export_name = @"export".opts.name.toSlice(ip); - try o.dg.renderTypeAndName( - fwd, - decl.typeOf(zcu), - .{ .identifier = export_name }, - CQualifiers.init(.{ .@"const" = is_variable_const }), - decl.alignment, - .complete, - ); - if (isMangledIdent(export_name, true)) { - try fwd.print(" zig_mangled_export({ }, {s}, {s})", .{ - fmtIdent(export_name), - fmtStringLiteral(export_name, null), - fmtStringLiteral(exports.items[0].opts.name.toSlice(ip), null), - }); - } else { - try fwd.print(" zig_export({s}, {s})", .{ - fmtStringLiteral(exports.items[0].opts.name.toSlice(ip), null), - fmtStringLiteral(export_name, null), - }); - } - try fwd.writeAll(";\n"); - } -} - pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFnMap.Entry) !void { const zcu = o.dg.zcu; const ip = &zcu.intern_pool; @@ -2885,19 +2760,19 @@ pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFn const fn_info = fn_ctype.info(ctype_pool).function; const fn_name = fmtCTypePoolString(val.fn_name, lazy_ctype_pool); - const fwd_decl_writer = o.dg.fwdDeclWriter(); - try fwd_decl_writer.print("static zig_{s} ", .{@tagName(key)}); - try o.dg.renderFunctionSignature(fwd_decl_writer, fn_decl_index, .forward, .{ + const fwd = o.dg.fwdDeclWriter(); + try fwd.print("static zig_{s} ", .{@tagName(key)}); + try o.dg.renderFunctionSignature(fwd, fn_decl.val, fn_decl.alignment, .forward, .{ .fmt_ctype_pool_string = fn_name, }); - try fwd_decl_writer.writeAll(";\n"); + try fwd.writeAll(";\n"); - try w.print("static zig_{s} ", .{@tagName(key)}); - try o.dg.renderFunctionSignature(w, fn_decl_index, .complete, .{ + try w.print("zig_{s} ", .{@tagName(key)}); + try o.dg.renderFunctionSignature(w, fn_decl.val, .none, .complete, .{ .fmt_ctype_pool_string = fn_name, }); try w.writeAll(" {\n return "); - try o.dg.renderDeclName(w, fn_decl_index, 0); + try o.dg.renderDeclName(w, fn_decl_index); try w.writeByte('('); for (0..fn_info.param_ctypes.len) |arg| { if (arg > 0) try w.writeAll(", "); @@ -2921,21 +2796,26 @@ pub fn genFunc(f: *Function) !void { o.code_header = std.ArrayList(u8).init(gpa); defer o.code_header.deinit(); - const is_global = o.dg.declIsGlobal(decl.val); - const fwd_decl_writer = o.dg.fwdDeclWriter(); - try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static "); - - if (zcu.decl_exports.get(decl_index)) |exports| - if (exports.items[0].opts.linkage == .weak) try fwd_decl_writer.writeAll("zig_weak_linkage_fn "); - try o.dg.renderFunctionSignature(fwd_decl_writer, decl_index, .forward, .{ .export_index = 0 }); - try fwd_decl_writer.writeAll(";\n"); - try genExports(o); + const fwd = o.dg.fwdDeclWriter(); + try fwd.writeAll("static "); + try o.dg.renderFunctionSignature( + fwd, + decl.val, + decl.alignment, + .forward, + .{ .decl = decl_index }, + ); + try fwd.writeAll(";\n"); - try o.indent_writer.insertNewline(); - if (!is_global) try o.writer().writeAll("static "); if (decl.@"linksection".toSlice(&zcu.intern_pool)) |s| try o.writer().print("zig_linksection_fn({s}) ", .{fmtStringLiteral(s, null)}); - try o.dg.renderFunctionSignature(o.writer(), decl_index, .complete, .{ .export_index = 0 }); + try o.dg.renderFunctionSignature( + o.writer(), + decl.val, + .none, + .complete, + .{ .decl = decl_index }, + ); try o.writer().writeByte(' '); // In case we need to use the header, populate it with a copy of the function @@ -2949,7 +2829,6 @@ pub fn genFunc(f: *Function) !void { const main_body = f.air.getMainBody(); try genBodyResolveState(f, undefined, &.{}, main_body, false); - try o.indent_writer.insertNewline(); // Take advantage of the free_locals map to bucket locals per type. All @@ -3007,20 +2886,25 @@ pub fn genDecl(o: *Object) !void { if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return; if (decl.val.getExternFunc(zcu)) |_| { - const fwd_decl_writer = o.dg.fwdDeclWriter(); - try fwd_decl_writer.writeAll("zig_extern "); - try o.dg.renderFunctionSignature(fwd_decl_writer, decl_index, .forward, .{ .export_index = 0 }); - try fwd_decl_writer.writeAll(";\n"); - try genExports(o); + const fwd = o.dg.fwdDeclWriter(); + try fwd.writeAll("zig_extern "); + try o.dg.renderFunctionSignature( + fwd, + decl.val, + decl.alignment, + .forward, + .{ .@"export" = .{ + .main_name = decl.name, + .extern_name = decl.name, + } }, + ); + try fwd.writeAll(";\n"); } else if (decl.val.getVariable(zcu)) |variable| { - try o.dg.renderFwdDecl(decl_index, variable, .final); - try genExports(o); + try o.dg.renderFwdDecl(decl_index, variable); if (variable.is_extern) return; - const is_global = variable.is_extern or o.dg.declIsGlobal(decl.val); const w = o.writer(); - if (!is_global) try w.writeAll("static "); if (variable.is_weak_linkage) try w.writeAll("zig_weak_linkage "); if (variable.is_threadlocal and !o.dg.mod.single_threaded) try w.writeAll("zig_threadlocal "); if (decl.@"linksection".toSlice(&zcu.intern_pool)) |s| @@ -3032,46 +2916,27 @@ pub fn genDecl(o: *Object) !void { try w.writeByte(';'); try o.indent_writer.insertNewline(); } else { - const is_global = o.dg.zcu.decl_exports.contains(decl_index); const decl_c_value = .{ .decl = decl_index }; - try genDeclValue(o, decl.val, is_global, decl_c_value, decl.alignment, decl.@"linksection"); + try genDeclValue(o, decl.val, decl_c_value, decl.alignment, decl.@"linksection"); } } pub fn genDeclValue( o: *Object, val: Value, - is_global: bool, decl_c_value: CValue, alignment: Alignment, @"linksection": InternPool.OptionalNullTerminatedString, ) !void { const zcu = o.dg.zcu; - const fwd_decl_writer = o.dg.fwdDeclWriter(); - const ty = val.typeOf(zcu); - try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static "); - try o.dg.renderTypeAndName(fwd_decl_writer, ty, decl_c_value, Const, alignment, .complete); - switch (o.dg.pass) { - .decl => |decl_index| { - if (zcu.decl_exports.get(decl_index)) |exports| { - const export_name = exports.items[0].opts.name.toSlice(&zcu.intern_pool); - if (isMangledIdent(export_name, true)) { - try fwd_decl_writer.print(" zig_mangled_final({ }, {s})", .{ - fmtIdent(export_name), fmtStringLiteral(export_name, null), - }); - } - } - }, - .anon => {}, - .flush => unreachable, - } - try fwd_decl_writer.writeAll(";\n"); - try genExports(o); + const fwd = o.dg.fwdDeclWriter(); + try fwd.writeAll("static "); + try o.dg.renderTypeAndName(fwd, ty, decl_c_value, Const, alignment, .complete); + try fwd.writeAll(";\n"); const w = o.writer(); - if (!is_global) try w.writeAll("static "); if (@"linksection".toSlice(&zcu.intern_pool)) |s| try w.print("zig_linksection({s}) ", .{fmtStringLiteral(s, null)}); try o.dg.renderTypeAndName(w, ty, decl_c_value, Const, alignment, .complete); @@ -3080,24 +2945,73 @@ pub fn genDeclValue( try w.writeAll(";\n"); } -pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { - if (true) @panic("TODO jacobly"); - - const tracy = trace(@src()); - defer tracy.end(); - +pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const u32) !void { const zcu = dg.zcu; - const decl_index = dg.pass.decl; - const decl = zcu.declPtr(decl_index); - const writer = dg.fwdDeclWriter(); + const ip = &zcu.intern_pool; + const fwd = dg.fwdDeclWriter(); - switch (decl.typeOf(zcu).zigTypeTag(zcu)) { - .Fn => if (dg.declIsGlobal(decl.val)) { - try writer.writeAll("zig_extern "); - try dg.renderFunctionSignature(writer, dg.pass.decl, .complete, .{ .export_index = 0 }); - try dg.fwd_decl.appendSlice(";\n"); + const main_name = zcu.all_exports.items[export_indices[0]].opts.name; + try fwd.writeAll("#define "); + switch (exported) { + .decl_index => |decl_index| try dg.renderDeclName(fwd, decl_index), + .value => |value| try DeclGen.renderAnonDeclName(fwd, Value.fromInterned(value)), + } + try fwd.writeByte(' '); + try fwd.print("{ }", .{fmtIdent(main_name.toSlice(ip))}); + try fwd.writeByte('\n'); + + const is_const = switch (ip.indexToKey(exported.getValue(zcu).toIntern())) { + .func, .extern_func => return for (export_indices) |export_index| { + const @"export" = &zcu.all_exports.items[export_index]; + try fwd.writeAll("zig_extern "); + if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage_fn "); + try dg.renderFunctionSignature( + fwd, + exported.getValue(zcu), + exported.getAlign(zcu), + .forward, + .{ .@"export" = .{ + .main_name = main_name, + .extern_name = @"export".opts.name, + } }, + ); + try fwd.writeAll(";\n"); }, - else => {}, + .variable => |variable| variable.is_const, + else => true, + }; + for (export_indices) |export_index| { + const @"export" = &zcu.all_exports.items[export_index]; + try fwd.writeAll("zig_extern "); + if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage "); + const extern_name = @"export".opts.name.toSlice(ip); + const is_mangled = isMangledIdent(extern_name, true); + const is_export = @"export".opts.name != main_name; + try dg.renderTypeAndName( + fwd, + exported.getValue(zcu).typeOf(zcu), + .{ .identifier = extern_name }, + CQualifiers.init(.{ .@"const" = is_const }), + exported.getAlign(zcu), + .complete, + ); + if (is_mangled and is_export) { + try fwd.print(" zig_mangled_export({ }, {s}, {s})", .{ + fmtIdent(extern_name), + fmtStringLiteral(extern_name, null), + fmtStringLiteral(main_name.toSlice(ip), null), + }); + } else if (is_mangled) { + try fwd.print(" zig_mangled({ }, {s})", .{ + fmtIdent(extern_name), fmtStringLiteral(extern_name, null), + }); + } else if (is_export) { + try fwd.print(" zig_export({s}, {s})", .{ + fmtStringLiteral(main_name.toSlice(ip), null), + fmtStringLiteral(extern_name, null), + }); + } + try fwd.writeAll(";\n"); } } @@ -4554,7 +4468,7 @@ fn airCall( }; }; switch (modifier) { - .auto, .always_tail => try f.object.dg.renderDeclName(writer, fn_decl, 0), + .auto, .always_tail => try f.object.dg.renderDeclName(writer, fn_decl), inline .never_tail, .never_inline => |m| try writer.writeAll(try f.getLazyFnName( @unionInit(LazyFnKey, @tagName(m), fn_decl), @unionInit(LazyFnValue.Data, @tagName(m), {}), diff --git a/src/link.zig b/src/link.zig index 009b38a681..298d81d80c 100644 --- a/src/link.zig +++ b/src/link.zig @@ -679,7 +679,6 @@ pub const File = struct { if (build_options.only_c) @compileError("unreachable"); switch (base.tag) { .plan9, - .c, .spirv, .nvptx, => {}, diff --git a/src/link/C.zig b/src/link/C.zig index 8372029d2d..be8397e196 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -39,6 +39,9 @@ anon_decls: std.AutoArrayHashMapUnmanaged(InternPool.Index, DeclBlock) = .{}, /// the keys of `anon_decls`. aligned_anon_decls: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment) = .{}, +exported_decls: std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, ExportedBlock) = .{}, +exported_values: std.AutoArrayHashMapUnmanaged(InternPool.Index, ExportedBlock) = .{}, + /// Optimization, `updateDecl` reuses this buffer rather than creating a new /// one with every call. fwd_decl_buf: std.ArrayListUnmanaged(u8) = .{}, @@ -80,6 +83,11 @@ pub const DeclBlock = struct { } }; +/// Per-exported-symbol data. +pub const ExportedBlock = struct { + fwd_decl: String = String.empty, +}; + pub fn getString(this: C, s: String) []const u8 { return this.string_bytes.items[s.start..][0..s.len]; } @@ -183,8 +191,6 @@ pub fn updateFunc( air: Air, liveness: Liveness, ) !void { - if (true) @panic("TODO jacobly"); - const gpa = self.base.comp.gpa; const func = zcu.funcInfo(func_index); @@ -240,9 +246,13 @@ pub fn updateFunc( function.deinit(); } + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); codegen.genFunc(&function) catch |err| switch (err) { error.AnalysisFail => { - try zcu.failed_decls.put(gpa, decl_index, function.object.dg.error_msg.?); + zcu.failed_analysis.putAssumeCapacityNoClobber( + InternPool.AnalUnit.wrap(.{ .decl = decl_index }), + function.object.dg.error_msg.?, + ); return; }, else => |e| return e, @@ -252,8 +262,6 @@ pub fn updateFunc( } fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void { - if (true) @panic("TODO jacobly"); - const gpa = self.base.comp.gpa; const anon_decl = self.anon_decls.keys()[i]; @@ -292,7 +300,7 @@ fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void { const c_value: codegen.CValue = .{ .constant = Value.fromInterned(anon_decl) }; const alignment: Alignment = self.aligned_anon_decls.get(anon_decl) orelse .none; - codegen.genDeclValue(&object, c_value.constant, false, c_value, alignment, .none) catch |err| switch (err) { + codegen.genDeclValue(&object, c_value.constant, c_value, alignment, .none) catch |err| switch (err) { error.AnalysisFail => { @panic("TODO: C backend AnalysisFail on anonymous decl"); //try zcu.failed_decls.put(gpa, decl_index, object.dg.error_msg.?); @@ -310,8 +318,6 @@ fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void { } pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void { - if (true) @panic("TODO jacobly"); - const tracy = trace(@src()); defer tracy.end(); @@ -357,9 +363,13 @@ pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void { code.* = object.code.moveToUnmanaged(); } + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); codegen.genDecl(&object) catch |err| switch (err) { error.AnalysisFail => { - try zcu.failed_decls.put(gpa, decl_index, object.dg.error_msg.?); + zcu.failed_analysis.putAssumeCapacityNoClobber( + InternPool.AnalUnit.wrap(.{ .decl = decl_index }), + object.dg.error_msg.?, + ); return; }, else => |e| return e, @@ -396,8 +406,6 @@ fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) { } pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !void { - if (true) @panic("TODO jacobly"); - _ = arena; // Has the same lifetime as the call to Compilation.update. const tracy = trace(@src()); @@ -460,26 +468,39 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; defer export_names.deinit(gpa); try export_names.ensureTotalCapacity(gpa, @intCast(zcu.single_exports.count())); - for (zcu.single_exports.values()) |export_idx| { - export_names.putAssumeCapacity(gpa, zcu.all_exports.items[export_idx].opts.name, {}); + for (zcu.single_exports.values()) |export_index| { + export_names.putAssumeCapacity(zcu.all_exports.items[export_index].opts.name, {}); } for (zcu.multi_exports.values()) |info| { - try export_names.ensureUnusedCapacity(info.len); - for (zcu.all_exports.items[info.index..][0..info.len]) |export_idx| { - export_names.putAssumeCapacity(gpa, zcu.all_exports.items[export_idx].opts.name, {}); + try export_names.ensureUnusedCapacity(gpa, info.len); + for (zcu.all_exports.items[info.index..][0..info.len]) |@"export"| { + export_names.putAssumeCapacity(@"export".opts.name, {}); } } - for (self.anon_decls.values()) |*decl_block| { - try self.flushDeclBlock(zcu, zcu.root_mod, &f, decl_block, export_names, .none); - } + for (self.anon_decls.keys(), self.anon_decls.values()) |value, *decl_block| try self.flushDeclBlock( + zcu, + zcu.root_mod, + &f, + decl_block, + self.exported_values.getPtr(value), + export_names, + .none, + ); for (self.decl_table.keys(), self.decl_table.values()) |decl_index, *decl_block| { const decl = zcu.declPtr(decl_index); - assert(decl.has_tv); - const extern_symbol_name = if (decl.isExtern(zcu)) decl.name.toOptional() else .none; + const extern_name = if (decl.isExtern(zcu)) decl.name.toOptional() else .none; const mod = zcu.namespacePtr(decl.src_namespace).file_scope.mod; - try self.flushDeclBlock(zcu, mod, &f, decl_block, export_names, extern_symbol_name); + try self.flushDeclBlock( + zcu, + mod, + &f, + decl_block, + self.exported_decls.getPtr(decl_index), + export_names, + extern_name, + ); } } @@ -512,12 +533,16 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo f.file_size += lazy_fwd_decl_len; // Now the code. - const anon_decl_values = self.anon_decls.values(); - const decl_values = self.decl_table.values(); - try f.all_buffers.ensureUnusedCapacity(gpa, 1 + anon_decl_values.len + decl_values.len); + try f.all_buffers.ensureUnusedCapacity(gpa, 1 + (self.anon_decls.count() + self.decl_table.count()) * 2); f.appendBufAssumeCapacity(self.lazy_code_buf.items); - for (anon_decl_values) |db| f.appendBufAssumeCapacity(self.getString(db.code)); - for (decl_values) |db| f.appendBufAssumeCapacity(self.getString(db.code)); + for (self.anon_decls.keys(), self.anon_decls.values()) |anon_decl, decl_block| f.appendCodeAssumeCapacity( + self.exported_values.contains(anon_decl), + self.getString(decl_block.code), + ); + for (self.decl_table.keys(), self.decl_table.values()) |decl_index, decl_block| f.appendCodeAssumeCapacity( + self.exported_decls.contains(decl_index), + self.getString(decl_block.code), + ); const file = self.base.file.?; try file.setEndPos(f.file_size); @@ -547,6 +572,12 @@ const Flush = struct { f.file_size += buf.len; } + fn appendCodeAssumeCapacity(f: *Flush, is_extern: bool, code: []const u8) void { + if (code.len == 0) return; + f.appendBufAssumeCapacity(if (is_extern) "\nzig_extern " else "\nstatic "); + f.appendBufAssumeCapacity(code); + } + fn deinit(f: *Flush, gpa: Allocator) void { f.all_buffers.deinit(gpa); f.asm_buf.deinit(gpa); @@ -734,19 +765,20 @@ fn flushDeclBlock( zcu: *Zcu, mod: *Module, f: *Flush, - decl_block: *DeclBlock, + decl_block: *const DeclBlock, + exported_block: ?*const ExportedBlock, export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void), - extern_symbol_name: InternPool.OptionalNullTerminatedString, + extern_name: InternPool.OptionalNullTerminatedString, ) FlushDeclError!void { const gpa = self.base.comp.gpa; try self.flushLazyFns(zcu, mod, f, &decl_block.ctype_pool, decl_block.lazy_fns); try f.all_buffers.ensureUnusedCapacity(gpa, 1); - fwd_decl: { - if (extern_symbol_name.unwrap()) |name| { - if (export_names.contains(name)) break :fwd_decl; - } - f.appendBufAssumeCapacity(self.getString(decl_block.fwd_decl)); - } + // avoid emitting extern decls that are already exported + if (extern_name.unwrap()) |name| if (export_names.contains(name)) return; + f.appendBufAssumeCapacity(self.getString(if (exported_block) |exported| + exported.fwd_decl + else + decl_block.fwd_decl)); } pub fn flushEmitH(zcu: *Zcu) !void { @@ -798,8 +830,56 @@ pub fn updateExports( exported: Zcu.Exported, export_indices: []const u32, ) !void { - _ = self; - _ = zcu; - _ = exported; - _ = export_indices; + const gpa = self.base.comp.gpa; + const mod, const pass: codegen.DeclGen.Pass, const decl_block, const exported_block = switch (exported) { + .decl_index => |decl_index| .{ + zcu.namespacePtr(zcu.declPtr(decl_index).src_namespace).file_scope.mod, + .{ .decl = decl_index }, + self.decl_table.getPtr(decl_index).?, + (try self.exported_decls.getOrPut(gpa, decl_index)).value_ptr, + }, + .value => |value| .{ + zcu.root_mod, + .{ .anon = value }, + self.anon_decls.getPtr(value).?, + (try self.exported_values.getOrPut(gpa, value)).value_ptr, + }, + }; + const ctype_pool = &decl_block.ctype_pool; + const fwd_decl = &self.fwd_decl_buf; + fwd_decl.clearRetainingCapacity(); + var dg: codegen.DeclGen = .{ + .gpa = gpa, + .zcu = zcu, + .mod = mod, + .error_msg = null, + .pass = pass, + .is_naked_fn = false, + .fwd_decl = fwd_decl.toManaged(gpa), + .ctype_pool = decl_block.ctype_pool, + .scratch = .{}, + .anon_decl_deps = .{}, + .aligned_anon_decls = .{}, + }; + defer { + assert(dg.anon_decl_deps.count() == 0); + assert(dg.aligned_anon_decls.count() == 0); + fwd_decl.* = dg.fwd_decl.moveToUnmanaged(); + ctype_pool.* = dg.ctype_pool.move(); + ctype_pool.freeUnusedCapacity(gpa); + dg.scratch.deinit(gpa); + } + try codegen.genExports(&dg, exported, export_indices); + exported_block.* = .{ .fwd_decl = try self.addString(dg.fwd_decl.items) }; +} + +pub fn deleteExport( + self: *C, + exported: Zcu.Exported, + _: InternPool.NullTerminatedString, +) void { + switch (exported) { + .decl_index => |decl_index| _ = self.exported_decls.swapRemove(decl_index), + .value => |value| _ = self.exported_values.swapRemove(value), + } } -- cgit v1.2.3