diff options
Diffstat (limited to 'src-self-hosted/Module.zig')
| -rw-r--r-- | src-self-hosted/Module.zig | 1907 |
1 files changed, 1452 insertions, 455 deletions
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig index 4bcc30a65e..89dcac3f41 100644 --- a/src-self-hosted/Module.zig +++ b/src-self-hosted/Module.zig @@ -15,13 +15,16 @@ const ir = @import("ir.zig"); const zir = @import("zir.zig"); const Module = @This(); const Inst = ir.Inst; +const ast = std.zig.ast; +const trace = @import("tracy.zig").trace; /// General-purpose allocator. allocator: *Allocator, /// Pointer to externally managed resource. root_pkg: *Package, /// Module owns this resource. -root_scope: *Scope.ZIRModule, +/// The `Scope` is either a `Scope.ZIRModule` or `Scope.File`. +root_scope: *Scope, bin_file: link.ElfFile, bin_file_dir: std.fs.Dir, bin_file_path: []const u8, @@ -35,10 +38,10 @@ decl_exports: std.AutoHashMap(*Decl, []*Export), /// This table owns the Export memory. export_owners: std.AutoHashMap(*Decl, []*Export), /// Maps fully qualified namespaced names to the Decl struct for them. -decl_table: std.AutoHashMap(Decl.Hash, *Decl), +decl_table: DeclTable, optimize_mode: std.builtin.Mode, -link_error_flags: link.ElfFile.ErrorFlags = link.ElfFile.ErrorFlags{}, +link_error_flags: link.ElfFile.ErrorFlags = .{}, work_queue: std.fifo.LinearFifo(WorkItem, .Dynamic), @@ -49,8 +52,8 @@ work_queue: std.fifo.LinearFifo(WorkItem, .Dynamic), /// a Decl can have a failed_decls entry but have analysis status of success. failed_decls: std.AutoHashMap(*Decl, *ErrorMsg), /// Using a map here for consistency with the other fields here. -/// The ErrorMsg memory is owned by the `Scope.ZIRModule`, using Module's allocator. -failed_files: std.AutoHashMap(*Scope.ZIRModule, *ErrorMsg), +/// The ErrorMsg memory is owned by the `Scope`, using Module's allocator. +failed_files: std.AutoHashMap(*Scope, *ErrorMsg), /// Using a map here for consistency with the other fields here. /// The ErrorMsg memory is owned by the `Export`, using Module's allocator. failed_exports: std.AutoHashMap(*Export, *ErrorMsg), @@ -60,15 +63,23 @@ failed_exports: std.AutoHashMap(*Export, *ErrorMsg), /// previous analysis. generation: u32 = 0, +next_anon_name_index: usize = 0, + /// Candidates for deletion. After a semantic analysis update completes, this list /// contains Decls that need to be deleted if they end up having no references to them. -deletion_set: std.ArrayListUnmanaged(*Decl) = std.ArrayListUnmanaged(*Decl){}, +deletion_set: std.ArrayListUnmanaged(*Decl) = .{}, + +keep_source_files_loaded: bool, + +const DeclTable = std.HashMap(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql); -pub const WorkItem = union(enum) { +const WorkItem = union(enum) { /// Write the machine code for a Decl to the output file. codegen_decl: *Decl, - /// Decl has been determined to be outdated; perform semantic analysis again. - re_analyze_decl: *Decl, + /// The Decl needs to be analyzed and possibly export itself. + /// It may have already be analyzed, or it may have been determined + /// to be outdated; in this case perform semantic analysis again. + analyze_decl: *Decl, }; pub const Export = struct { @@ -99,13 +110,12 @@ pub const Decl = struct { /// mapping them to an address in the output file. /// Memory owned by this decl, using Module's allocator. name: [*:0]const u8, - /// The direct parent container of the Decl. This field will need to get more fleshed out when - /// self-hosted supports proper struct types and Zig AST => ZIR. + /// The direct parent container of the Decl. This is either a `Scope.File` or `Scope.ZIRModule`. /// Reference to externally owned memory. - scope: *Scope.ZIRModule, - /// Byte offset into the source file that contains this declaration. - /// This is the base offset that src offsets within this Decl are relative to. - src: usize, + scope: *Scope, + /// The AST Node decl index or ZIR Inst index that contains this declaration. + /// Must be recomputed when the corresponding source file is modified. + src_index: usize, /// The most recent value of the Decl after a successful semantic analysis. typed_value: union(enum) { never_succeeded: void, @@ -116,6 +126,9 @@ pub const Decl = struct { /// analysis of the function body is performed with this value set to `success`. Functions /// have their own analysis status field. analysis: enum { + /// This Decl corresponds to an AST Node that has not been referenced yet, and therefore + /// because of Zig's lazy declaration analysis, it will remain unanalyzed until referenced. + unreferenced, /// Semantic analysis for this Decl is running right now. This state detects dependency loops. in_progress, /// This Decl might be OK but it depends on another one which did not successfully complete @@ -125,6 +138,10 @@ pub const Decl = struct { /// There will be a corresponding ErrorMsg in Module.failed_decls. sema_failure, /// There will be a corresponding ErrorMsg in Module.failed_decls. + /// This indicates the failure was something like running out of disk space, + /// and attempting semantic analysis again may succeed. + sema_failure_retryable, + /// There will be a corresponding ErrorMsg in Module.failed_decls. codegen_failure, /// There will be a corresponding ErrorMsg in Module.failed_decls. /// This indicates the failure was something like running out of disk space, @@ -150,7 +167,7 @@ pub const Decl = struct { /// This is populated regardless of semantic analysis and code generation. link: link.ElfFile.TextBlock = link.ElfFile.TextBlock.empty, - contents_hash: Hash, + contents_hash: std.zig.SrcHash, /// The shallow set of other decls whose typed_value could possibly change if this Decl's /// typed_value is modified. @@ -169,28 +186,28 @@ pub const Decl = struct { allocator.destroy(self); } - pub const Hash = [16]u8; - - /// If the name is small enough, it is used directly as the hash. - /// If it is long, blake3 hash is computed. - pub fn hashSimpleName(name: []const u8) Hash { - var out: Hash = undefined; - if (name.len <= Hash.len) { - mem.copy(u8, &out, name); - mem.set(u8, out[name.len..], 0); - } else { - std.crypto.Blake3.hash(name, &out); + pub fn src(self: Decl) usize { + switch (self.scope.tag) { + .file => { + const file = @fieldParentPtr(Scope.File, "base", self.scope); + const tree = file.contents.tree; + const decl_node = tree.root_node.decls()[self.src_index]; + return tree.token_locs[decl_node.firstToken()].start; + }, + .zir_module => { + const zir_module = @fieldParentPtr(Scope.ZIRModule, "base", self.scope); + const module = zir_module.contents.module; + const src_decl = module.decls[self.src_index]; + return src_decl.inst.src; + }, + .block => unreachable, + .gen_zir => unreachable, + .decl => unreachable, } - return out; } - /// Must generate unique bytes with no collisions with other decls. - /// The point of hashing here is only to limit the number of bytes of - /// the unique identifier to a fixed size (16 bytes). - pub fn fullyQualifiedNameHash(self: Decl) Hash { - // Right now we only have ZIRModule as the source. So this is simply the - // relative name of the decl. - return hashSimpleName(mem.spanZ(self.name)); + pub fn fullyQualifiedNameHash(self: Decl) Scope.NameHash { + return self.scope.fullyQualifiedNameHash(mem.spanZ(self.name)); } pub fn typedValue(self: *Decl) error{AnalysisFail}!TypedValue { @@ -248,11 +265,9 @@ pub const Decl = struct { /// Fn struct memory is owned by the Decl's TypedValue.Managed arena allocator. pub const Fn = struct { /// This memory owned by the Decl's TypedValue.Managed arena allocator. - fn_type: Type, analysis: union(enum) { - /// The value is the source instruction. - queued: *zir.Inst.Fn, - in_progress: *Analysis, + queued: *ZIR, + in_progress, /// There will be a corresponding ErrorMsg in Module.failed_decls sema_failure, /// This Fn might be OK but it depends on another Decl which did not successfully complete @@ -266,16 +281,20 @@ pub const Fn = struct { /// of Fn analysis. pub const Analysis = struct { inner_block: Scope.Block, - /// TODO Performance optimization idea: instead of this inst_table, - /// use a field in the zir.Inst instead to track corresponding instructions - inst_table: std.AutoHashMap(*zir.Inst, *Inst), - needed_inst_capacity: usize, + }; + + /// Contains un-analyzed ZIR instructions generated from Zig source AST. + pub const ZIR = struct { + body: zir.Module.Body, + arena: std.heap.ArenaAllocator.State, }; }; pub const Scope = struct { tag: Tag, + pub const NameHash = [16]u8; + pub fn cast(base: *Scope, comptime T: type) ?*T { if (base.tag != T.base_tag) return null; @@ -289,7 +308,9 @@ pub const Scope = struct { switch (self.tag) { .block => return self.cast(Block).?.arena, .decl => return &self.cast(DeclAnalysis).?.arena.allocator, + .gen_zir => return &self.cast(GenZIR).?.arena.allocator, .zir_module => return &self.cast(ZIRModule).?.contents.module.arena.allocator, + .file => unreachable, } } @@ -298,18 +319,45 @@ pub const Scope = struct { pub fn decl(self: *Scope) ?*Decl { return switch (self.tag) { .block => self.cast(Block).?.decl, + .gen_zir => self.cast(GenZIR).?.decl, .decl => self.cast(DeclAnalysis).?.decl, .zir_module => null, + .file => null, }; } - /// Asserts the scope has a parent which is a ZIRModule and + /// Asserts the scope has a parent which is a ZIRModule or File and /// returns it. - pub fn namespace(self: *Scope) *ZIRModule { + pub fn namespace(self: *Scope) *Scope { switch (self.tag) { .block => return self.cast(Block).?.decl.scope, + .gen_zir => return self.cast(GenZIR).?.decl.scope, .decl => return self.cast(DeclAnalysis).?.decl.scope, - .zir_module => return self.cast(ZIRModule).?, + .zir_module, .file => return self, + } + } + + /// Must generate unique bytes with no collisions with other decls. + /// The point of hashing here is only to limit the number of bytes of + /// the unique identifier to a fixed size (16 bytes). + pub fn fullyQualifiedNameHash(self: *Scope, name: []const u8) NameHash { + switch (self.tag) { + .block => unreachable, + .gen_zir => unreachable, + .decl => unreachable, + .zir_module => return self.cast(ZIRModule).?.fullyQualifiedNameHash(name), + .file => return self.cast(File).?.fullyQualifiedNameHash(name), + } + } + + /// Asserts the scope is a child of a File and has an AST tree and returns the tree. + pub fn tree(self: *Scope) *ast.Tree { + switch (self.tag) { + .file => return self.cast(File).?.contents.tree, + .zir_module => unreachable, + .decl => return self.cast(DeclAnalysis).?.decl.scope.cast(File).?.contents.tree, + .block => return self.cast(Block).?.decl.scope.cast(File).?.contents.tree, + .gen_zir => return self.cast(GenZIR).?.decl.scope.cast(File).?.contents.tree, } } @@ -325,10 +373,173 @@ pub const Scope = struct { }); } + /// Asserts the scope has a parent which is a ZIRModule or File and + /// returns the sub_file_path field. + pub fn subFilePath(base: *Scope) []const u8 { + switch (base.tag) { + .file => return @fieldParentPtr(File, "base", base).sub_file_path, + .zir_module => return @fieldParentPtr(ZIRModule, "base", base).sub_file_path, + .block => unreachable, + .gen_zir => unreachable, + .decl => unreachable, + } + } + + pub fn unload(base: *Scope, allocator: *Allocator) void { + switch (base.tag) { + .file => return @fieldParentPtr(File, "base", base).unload(allocator), + .zir_module => return @fieldParentPtr(ZIRModule, "base", base).unload(allocator), + .block => unreachable, + .gen_zir => unreachable, + .decl => unreachable, + } + } + + pub fn getSource(base: *Scope, module: *Module) ![:0]const u8 { + switch (base.tag) { + .file => return @fieldParentPtr(File, "base", base).getSource(module), + .zir_module => return @fieldParentPtr(ZIRModule, "base", base).getSource(module), + .gen_zir => unreachable, + .block => unreachable, + .decl => unreachable, + } + } + + /// Asserts the scope is a namespace Scope and removes the Decl from the namespace. + pub fn removeDecl(base: *Scope, child: *Decl) void { + switch (base.tag) { + .file => return @fieldParentPtr(File, "base", base).removeDecl(child), + .zir_module => return @fieldParentPtr(ZIRModule, "base", base).removeDecl(child), + .block => unreachable, + .gen_zir => unreachable, + .decl => unreachable, + } + } + + /// Asserts the scope is a File or ZIRModule and deinitializes it, then deallocates it. + pub fn destroy(base: *Scope, allocator: *Allocator) void { + switch (base.tag) { + .file => { + const scope_file = @fieldParentPtr(File, "base", base); + scope_file.deinit(allocator); + allocator.destroy(scope_file); + }, + .zir_module => { + const scope_zir_module = @fieldParentPtr(ZIRModule, "base", base); + scope_zir_module.deinit(allocator); + allocator.destroy(scope_zir_module); + }, + .block => unreachable, + .gen_zir => unreachable, + .decl => unreachable, + } + } + + fn name_hash_hash(x: NameHash) u32 { + return @truncate(u32, @bitCast(u128, x)); + } + + fn name_hash_eql(a: NameHash, b: NameHash) bool { + return @bitCast(u128, a) == @bitCast(u128, b); + } + pub const Tag = enum { + /// .zir source code. zir_module, + /// .zig source code. + file, block, decl, + gen_zir, + }; + + pub const File = struct { + pub const base_tag: Tag = .file; + base: Scope = Scope{ .tag = base_tag }, + + /// Relative to the owning package's root_src_dir. + /// Reference to external memory, not owned by File. + sub_file_path: []const u8, + source: union(enum) { + unloaded: void, + bytes: [:0]const u8, + }, + contents: union { + not_available: void, + tree: *ast.Tree, + }, + status: enum { + never_loaded, + unloaded_success, + unloaded_parse_failure, + loaded_success, + }, + + /// Direct children of the file. + decls: ArrayListUnmanaged(*Decl), + + pub fn unload(self: *File, allocator: *Allocator) void { + switch (self.status) { + .never_loaded, + .unloaded_parse_failure, + .unloaded_success, + => {}, + + .loaded_success => { + self.contents.tree.deinit(); + self.status = .unloaded_success; + }, + } + switch (self.source) { + .bytes => |bytes| { + allocator.free(bytes); + self.source = .{ .unloaded = {} }; + }, + .unloaded => {}, + } + } + + pub fn deinit(self: *File, allocator: *Allocator) void { + self.decls.deinit(allocator); + self.unload(allocator); + self.* = undefined; + } + + pub fn removeDecl(self: *File, child: *Decl) void { + for (self.decls.items) |item, i| { + if (item == child) { + _ = self.decls.swapRemove(i); + return; + } + } + } + + pub fn dumpSrc(self: *File, src: usize) void { + const loc = std.zig.findLineColumn(self.source.bytes, src); + std.debug.warn("{}:{}:{}\n", .{ self.sub_file_path, loc.line + 1, loc.column + 1 }); + } + + pub fn getSource(self: *File, module: *Module) ![:0]const u8 { + switch (self.source) { + .unloaded => { + const source = try module.root_pkg.root_src_dir.readFileAllocOptions( + module.allocator, + self.sub_file_path, + std.math.maxInt(u32), + 1, + 0, + ); + self.source = .{ .bytes = source }; + return source; + }, + .bytes => |bytes| return bytes, + } + } + + pub fn fullyQualifiedNameHash(self: *File, name: []const u8) NameHash { + // We don't have struct scopes yet so this is currently just a simple name hash. + return std.zig.hashSrc(name); + } }; pub const ZIRModule = struct { @@ -355,6 +566,11 @@ pub const Scope = struct { loaded_success, }, + /// Even though .zir files only have 1 module, this set is still needed + /// because of anonymous Decls, which can exist in the global set, but + /// not this one. + decls: ArrayListUnmanaged(*Decl), + pub fn unload(self: *ZIRModule, allocator: *Allocator) void { switch (self.status) { .never_loaded, @@ -366,11 +582,13 @@ pub const Scope = struct { .loaded_success => { self.contents.module.deinit(allocator); allocator.destroy(self.contents.module); + self.contents = .{ .not_available = {} }; self.status = .unloaded_success; }, .loaded_sema_failure => { self.contents.module.deinit(allocator); allocator.destroy(self.contents.module); + self.contents = .{ .not_available = {} }; self.status = .unloaded_sema_failure; }, } @@ -384,14 +602,46 @@ pub const Scope = struct { } pub fn deinit(self: *ZIRModule, allocator: *Allocator) void { + self.decls.deinit(allocator); self.unload(allocator); self.* = undefined; } + pub fn removeDecl(self: *ZIRModule, child: *Decl) void { + for (self.decls.items) |item, i| { + if (item == child) { + _ = self.decls.swapRemove(i); + return; + } + } + } + pub fn dumpSrc(self: *ZIRModule, src: usize) void { const loc = std.zig.findLineColumn(self.source.bytes, src); std.debug.warn("{}:{}:{}\n", .{ self.sub_file_path, loc.line + 1, loc.column + 1 }); } + + pub fn getSource(self: *ZIRModule, module: *Module) ![:0]const u8 { + switch (self.source) { + .unloaded => { + const source = try module.root_pkg.root_src_dir.readFileAllocOptions( + module.allocator, + self.sub_file_path, + std.math.maxInt(u32), + 1, + 0, + ); + self.source = .{ .bytes = source }; + return source; + }, + .bytes => |bytes| return bytes, + } + } + + pub fn fullyQualifiedNameHash(self: *ZIRModule, name: []const u8) NameHash { + // ZIR modules only have 1 file with all decls global in the same namespace. + return std.zig.hashSrc(name); + } }; /// This is a temporary structure, references to it are valid only @@ -399,7 +649,7 @@ pub const Scope = struct { pub const Block = struct { pub const base_tag: Tag = .block; base: Scope = Scope{ .tag = base_tag }, - func: *Fn, + func: ?*Fn, decl: *Decl, instructions: ArrayListUnmanaged(*Inst), /// Points to the arena allocator of DeclAnalysis @@ -414,6 +664,16 @@ pub const Scope = struct { decl: *Decl, arena: std.heap.ArenaAllocator, }; + + /// This is a temporary structure, references to it are valid only + /// during semantic analysis of the decl. + pub const GenZIR = struct { + pub const base_tag: Tag = .gen_zir; + base: Scope = Scope{ .tag = base_tag }, + decl: *Decl, + arena: std.heap.ArenaAllocator, + instructions: std.ArrayList(*zir.Inst), + }; }; pub const Body = struct { @@ -463,19 +723,10 @@ pub const InitOptions = struct { link_mode: ?std.builtin.LinkMode = null, object_format: ?std.builtin.ObjectFormat = null, optimize_mode: std.builtin.Mode = .Debug, + keep_source_files_loaded: bool = false, }; pub fn init(gpa: *Allocator, options: InitOptions) !Module { - const root_scope = try gpa.create(Scope.ZIRModule); - errdefer gpa.destroy(root_scope); - - root_scope.* = .{ - .sub_file_path = options.root_pkg.root_src_path, - .source = .{ .unloaded = {} }, - .contents = .{ .not_available = {} }, - .status = .never_loaded, - }; - const bin_file_dir = options.bin_file_dir orelse std.fs.cwd(); var bin_file = try link.openBinFilePath(gpa, bin_file_dir, options.bin_file_path, .{ .target = options.target, @@ -485,6 +736,32 @@ pub fn init(gpa: *Allocator, options: InitOptions) !Module { }); errdefer bin_file.deinit(); + const root_scope = blk: { + if (mem.endsWith(u8, options.root_pkg.root_src_path, ".zig")) { + const root_scope = try gpa.create(Scope.File); + root_scope.* = .{ + .sub_file_path = options.root_pkg.root_src_path, + .source = .{ .unloaded = {} }, + .contents = .{ .not_available = {} }, + .status = .never_loaded, + .decls = .{}, + }; + break :blk &root_scope.base; + } else if (mem.endsWith(u8, options.root_pkg.root_src_path, ".zir")) { + const root_scope = try gpa.create(Scope.ZIRModule); + root_scope.* = .{ + .sub_file_path = options.root_pkg.root_src_path, + .source = .{ .unloaded = {} }, + .contents = .{ .not_available = {} }, + .status = .never_loaded, + .decls = .{}, + }; + break :blk &root_scope.base; + } else { + unreachable; + } + }; + return Module{ .allocator = gpa, .root_pkg = options.root_pkg, @@ -493,13 +770,14 @@ pub fn init(gpa: *Allocator, options: InitOptions) !Module { .bin_file_path = options.bin_file_path, .bin_file = bin_file, .optimize_mode = options.optimize_mode, - .decl_table = std.AutoHashMap(Decl.Hash, *Decl).init(gpa), + .decl_table = DeclTable.init(gpa), .decl_exports = std.AutoHashMap(*Decl, []*Export).init(gpa), .export_owners = std.AutoHashMap(*Decl, []*Export).init(gpa), .failed_decls = std.AutoHashMap(*Decl, *ErrorMsg).init(gpa), - .failed_files = std.AutoHashMap(*Scope.ZIRModule, *ErrorMsg).init(gpa), + .failed_files = std.AutoHashMap(*Scope, *ErrorMsg).init(gpa), .failed_exports = std.AutoHashMap(*Export, *ErrorMsg).init(gpa), .work_queue = std.fifo.LinearFifo(WorkItem, .Dynamic).init(gpa), + .keep_source_files_loaded = options.keep_source_files_loaded, }; } @@ -551,10 +829,7 @@ pub fn deinit(self: *Module) void { } self.export_owners.deinit(); } - { - self.root_scope.deinit(allocator); - allocator.destroy(self.root_scope); - } + self.root_scope.destroy(allocator); self.* = undefined; } @@ -571,19 +846,31 @@ pub fn target(self: Module) std.Target { /// Detect changes to source files, perform semantic analysis, and update the output files. pub fn update(self: *Module) !void { + const tracy = trace(@src()); + defer tracy.end(); + self.generation += 1; // TODO Use the cache hash file system to detect which source files changed. - // Here we simulate a full cache miss. - // Analyze the root source file now. - // Source files could have been loaded for any reason; to force a refresh we unload now. - self.root_scope.unload(self.allocator); - self.analyzeRoot(self.root_scope) catch |err| switch (err) { - error.AnalysisFail => { - assert(self.totalErrorCount() != 0); - }, - else => |e| return e, - }; + // Until then we simulate a full cache miss. Source files could have been loaded for any reason; + // to force a refresh we unload now. + if (self.root_scope.cast(Scope.File)) |zig_file| { + zig_file.unload(self.allocator); + self.analyzeRootSrcFile(zig_file) catch |err| switch (err) { + error.AnalysisFail => { + assert(self.totalErrorCount() != 0); + }, + else => |e| return e, + }; + } else if (self.root_scope.cast(Scope.ZIRModule)) |zir_module| { + zir_module.unload(self.allocator); + self.analyzeRootZIRModule(zir_module) catch |err| switch (err) { + error.AnalysisFail => { + assert(self.totalErrorCount() != 0); + }, + else => |e| return e, + }; + } try self.performAllTheWork(); @@ -596,14 +883,16 @@ pub fn update(self: *Module) !void { try self.deleteDecl(decl); } + self.link_error_flags = self.bin_file.error_flags; + // If there are any errors, we anticipate the source files being loaded // to report error messages. Otherwise we unload all source files to save memory. if (self.totalErrorCount() == 0) { - self.root_scope.unload(self.allocator); + if (!self.keep_source_files_loaded) { + self.root_scope.unload(self.allocator); + } + try self.bin_file.flush(); } - - try self.bin_file.flush(); - self.link_error_flags = self.bin_file.error_flags; } /// Having the file open for writing is problematic as far as executing the @@ -619,10 +908,10 @@ pub fn makeBinFileWritable(self: *Module) !void { } pub fn totalErrorCount(self: *Module) usize { - return self.failed_decls.size + + const total = self.failed_decls.size + self.failed_files.size + - self.failed_exports.size + - @boolToInt(self.link_error_flags.no_entry_point_found); + self.failed_exports.size; + return if (total == 0) @boolToInt(self.link_error_flags.no_entry_point_found) else total; } pub fn getAllErrorsAlloc(self: *Module) !AllErrors { @@ -637,8 +926,8 @@ pub fn getAllErrorsAlloc(self: *Module) !AllErrors { while (it.next()) |kv| { const scope = kv.key; const err_msg = kv.value; - const source = try self.getSource(scope); - try AllErrors.add(&arena, &errors, scope.sub_file_path, source, err_msg.*); + const source = try scope.getSource(self); + try AllErrors.add(&arena, &errors, scope.subFilePath(), source, err_msg.*); } } { @@ -646,8 +935,8 @@ pub fn getAllErrorsAlloc(self: *Module) !AllErrors { while (it.next()) |kv| { const decl = kv.key; const err_msg = kv.value; - const source = try self.getSource(decl.scope); - try AllErrors.add(&arena, &errors, decl.scope.sub_file_path, source, err_msg.*); + const source = try decl.scope.getSource(self); + try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*); } } { @@ -655,12 +944,12 @@ pub fn getAllErrorsAlloc(self: *Module) !AllErrors { while (it.next()) |kv| { const decl = kv.key.owner_decl; const err_msg = kv.value; - const source = try self.getSource(decl.scope); - try AllErrors.add(&arena, &errors, decl.scope.sub_file_path, source, err_msg.*); + const source = try decl.scope.getSource(self); + try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*); } } - if (self.link_error_flags.no_entry_point_found) { + if (errors.items.len == 0 and self.link_error_flags.no_entry_point_found) { try errors.append(.{ .src_path = self.root_pkg.root_src_path, .line = 0, @@ -683,12 +972,14 @@ const InnerError = error{ OutOfMemory, AnalysisFail }; pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void { while (self.work_queue.readItem()) |work_item| switch (work_item) { .codegen_decl => |decl| switch (decl.analysis) { + .unreferenced => unreachable, .in_progress => unreachable, .outdated => unreachable, .sema_failure, .codegen_failure, .dependency_failure, + .sema_failure_retryable, => continue, .complete, .codegen_failure_retryable => { @@ -696,12 +987,10 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void { switch (payload.func.analysis) { .queued => self.analyzeFnBody(decl, payload.func) catch |err| switch (err) { error.AnalysisFail => { - if (payload.func.analysis == .queued) { - payload.func.analysis = .dependency_failure; - } + assert(payload.func.analysis != .in_progress); continue; }, - else => |e| return e, + error.OutOfMemory => return error.OutOfMemory, }, .in_progress => unreachable, .sema_failure, .dependency_failure => continue, @@ -720,7 +1009,7 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void { try self.failed_decls.ensureCapacity(self.failed_decls.size + 1); self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create( self.allocator, - decl.src, + decl.src(), "unable to codegen: {}", .{@errorName(err)}, )); @@ -729,41 +1018,560 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void { }; }, }, - .re_analyze_decl => |decl| switch (decl.analysis) { - .in_progress => unreachable, + .analyze_decl => |decl| { + self.ensureDeclAnalyzed(decl) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => continue, + }; + }, + }; +} - .sema_failure, - .codegen_failure, - .dependency_failure, - .complete, - .codegen_failure_retryable, - => continue, +fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void { + const tracy = trace(@src()); + defer tracy.end(); - .outdated => { - const zir_module = self.getSrcModule(decl.scope) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - else => { - try self.failed_decls.ensureCapacity(self.failed_decls.size + 1); - self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create( - self.allocator, - decl.src, - "unable to load source file '{}': {}", - .{ decl.scope.sub_file_path, @errorName(err) }, - )); - decl.analysis = .codegen_failure_retryable; - continue; + const subsequent_analysis = switch (decl.analysis) { + .in_progress => unreachable, + + .sema_failure, + .sema_failure_retryable, + .codegen_failure, + .dependency_failure, + .codegen_failure_retryable, + => return error.AnalysisFail, + + .complete, .outdated => blk: { + if (decl.generation == self.generation) { + assert(decl.analysis == .complete); + return; + } + //std.debug.warn("re-analyzing {}\n", .{decl.name}); + + // The exports this Decl performs will be re-discovered, so we remove them here + // prior to re-analysis. + self.deleteDeclExports(decl); + // Dependencies will be re-discovered, so we remove them here prior to re-analysis. + for (decl.dependencies.items) |dep| { + dep.removeDependant(decl); + if (dep.dependants.items.len == 0 and !dep.deletion_flag) { + // We don't perform a deletion here, because this Decl or another one + // may end up referencing it before the update is complete. + dep.deletion_flag = true; + try self.deletion_set.append(self.allocator, dep); + } + } + decl.dependencies.shrink(self.allocator, 0); + + break :blk true; + }, + + .unreferenced => false, + }; + + const type_changed = if (self.root_scope.cast(Scope.ZIRModule)) |zir_module| + try self.analyzeZirDecl(decl, zir_module.contents.module.decls[decl.src_index]) + else + self.astGenAndAnalyzeDecl(decl) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => return error.AnalysisFail, + else => { + try self.failed_decls.ensureCapacity(self.failed_decls.size + 1); + self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create( + self.allocator, + decl.src(), + "unable to analyze: {}", + .{@errorName(err)}, + )); + decl.analysis = .sema_failure_retryable; + return error.AnalysisFail; + }, + }; + + if (subsequent_analysis) { + // We may need to chase the dependants and re-analyze them. + // However, if the decl is a function, and the type is the same, we do not need to. + if (type_changed or decl.typed_value.most_recent.typed_value.val.tag() != .function) { + for (decl.dependants.items) |dep| { + switch (dep.analysis) { + .unreferenced => unreachable, + .in_progress => unreachable, + .outdated => continue, // already queued for update + + .dependency_failure, + .sema_failure, + .sema_failure_retryable, + .codegen_failure, + .codegen_failure_retryable, + .complete, + => if (dep.generation != self.generation) { + try self.markOutdatedDecl(dep); }, + } + } + } + } +} + +fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { + const tracy = trace(@src()); + defer tracy.end(); + + const file_scope = decl.scope.cast(Scope.File).?; + const tree = try self.getAstTree(file_scope); + const ast_node = tree.root_node.decls()[decl.src_index]; + switch (ast_node.id) { + .FnProto => { + const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", ast_node); + + decl.analysis = .in_progress; + + // This arena allocator's memory is discarded at the end of this function. It is used + // to determine the type of the function, and hence the type of the decl, which is needed + // to complete the Decl analysis. + var fn_type_scope: Scope.GenZIR = .{ + .decl = decl, + .arena = std.heap.ArenaAllocator.init(self.allocator), + .instructions = std.ArrayList(*zir.Inst).init(self.allocator), + }; + defer fn_type_scope.arena.deinit(); + defer fn_type_scope.instructions.deinit(); + + const body_node = fn_proto.body_node orelse + return self.failTok(&fn_type_scope.base, fn_proto.fn_token, "TODO implement extern functions", .{}); + if (fn_proto.params_len != 0) { + return self.failTok( + &fn_type_scope.base, + fn_proto.params()[0].name_token.?, + "TODO implement function parameters", + .{}, + ); + } + if (fn_proto.lib_name) |lib_name| { + return self.failNode(&fn_type_scope.base, lib_name, "TODO implement function library name", .{}); + } + if (fn_proto.align_expr) |align_expr| { + return self.failNode(&fn_type_scope.base, align_expr, "TODO implement function align expression", .{}); + } + if (fn_proto.section_expr) |sect_expr| { + return self.failNode(&fn_type_scope.base, sect_expr, "TODO implement function section expression", .{}); + } + if (fn_proto.callconv_expr) |callconv_expr| { + return self.failNode( + &fn_type_scope.base, + callconv_expr, + "TODO implement function calling convention expression", + .{}, + ); + } + const return_type_expr = switch (fn_proto.return_type) { + .Explicit => |node| node, + .InferErrorSet => |node| return self.failNode(&fn_type_scope.base, node, "TODO implement inferred error sets", .{}), + .Invalid => |tok| return self.failTok(&fn_type_scope.base, tok, "unable to parse return type", .{}), + }; + + const return_type_inst = try self.astGenExpr(&fn_type_scope.base, return_type_expr); + const fn_src = tree.token_locs[fn_proto.fn_token].start; + const fn_type_inst = try self.addZIRInst(&fn_type_scope.base, fn_src, zir.Inst.FnType, .{ + .return_type = return_type_inst, + .param_types = &[0]*zir.Inst{}, + }, .{}); + _ = try self.addZIRInst(&fn_type_scope.base, fn_src, zir.Inst.Return, .{ .operand = fn_type_inst }, .{}); + + // We need the memory for the Type to go into the arena for the Decl + var decl_arena = std.heap.ArenaAllocator.init(self.allocator); + errdefer decl_arena.deinit(); + const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State); + + var block_scope: Scope.Block = .{ + .func = null, + .decl = decl, + .instructions = .{}, + .arena = &decl_arena.allocator, + }; + defer block_scope.instructions.deinit(self.allocator); + + const fn_type = try self.analyzeBodyValueAsType(&block_scope, .{ + .instructions = fn_type_scope.instructions.items, + }); + const new_func = try decl_arena.allocator.create(Fn); + const fn_payload = try decl_arena.allocator.create(Value.Payload.Function); + + const fn_zir = blk: { + // This scope's arena memory is discarded after the ZIR generation + // pass completes, and semantic analysis of it completes. + var gen_scope: Scope.GenZIR = .{ + .decl = decl, + .arena = std.heap.ArenaAllocator.init(self.allocator), + .instructions = std.ArrayList(*zir.Inst).init(self.allocator), }; - const decl_name = mem.spanZ(decl.name); - // We already detected deletions, so we know this will be found. - const src_decl = zir_module.findDecl(decl_name).?; - self.reAnalyzeDecl(decl, src_decl) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => continue, + errdefer gen_scope.arena.deinit(); + defer gen_scope.instructions.deinit(); + + const body_block = body_node.cast(ast.Node.Block).?; + + try self.astGenBlock(&gen_scope.base, body_block); + + const fn_zir = try gen_scope.arena.allocator.create(Fn.ZIR); + fn_zir.* = .{ + .body = .{ + .instructions = try gen_scope.arena.allocator.dupe(*zir.Inst, gen_scope.instructions.items), + }, + .arena = gen_scope.arena.state, }; - }, + break :blk fn_zir; + }; + + new_func.* = .{ + .analysis = .{ .queued = fn_zir }, + .owner_decl = decl, + }; + fn_payload.* = .{ .func = new_func }; + + var prev_type_has_bits = false; + var type_changed = true; + + if (decl.typedValueManaged()) |tvm| { + prev_type_has_bits = tvm.typed_value.ty.hasCodeGenBits(); + type_changed = !tvm.typed_value.ty.eql(fn_type); + + tvm.deinit(self.allocator); + } + + decl_arena_state.* = decl_arena.state; + decl.typed_value = .{ + .most_recent = .{ + .typed_value = .{ + .ty = fn_type, + .val = Value.initPayload(&fn_payload.base), + }, + .arena = decl_arena_state, + }, + }; + decl.analysis = .complete; + decl.generation = self.generation; + + if (fn_type.hasCodeGenBits()) { + // We don't fully codegen the decl until later, but we do need to reserve a global + // offset table index for it. This allows us to codegen decls out of dependency order, + // increasing how many computations can be done in parallel. + try self.bin_file.allocateDeclIndexes(decl); + try self.work_queue.writeItem(.{ .codegen_decl = decl }); + } else if (prev_type_has_bits) { + self.bin_file.freeDecl(decl); + } + + if (fn_proto.extern_export_inline_token) |maybe_export_token| { + if (tree.token_ids[maybe_export_token] == .Keyword_export) { + const export_src = tree.token_locs[maybe_export_token].start; + const name_loc = tree.token_locs[fn_proto.name_token.?]; + const name = tree.tokenSliceLoc(name_loc); + // The scope needs to have the decl in it. + try self.analyzeExport(&block_scope.base, export_src, name, decl); + } + } + return type_changed; }, + .VarDecl => @panic("TODO var decl"), + .Comptime => @panic("TODO comptime decl"), + .Use => @panic("TODO usingnamespace decl"), + else => unreachable, + } +} + +fn analyzeBodyValueAsType(self: *Module, block_scope: *Scope.Block, body: zir.Module.Body) !Type { + try self.analyzeBody(&block_scope.base, body); + for (block_scope.instructions.items) |inst| { + if (inst.cast(Inst.Ret)) |ret| { + const val = try self.resolveConstValue(&block_scope.base, ret.args.operand); + return val.toType(); + } else { + return self.fail(&block_scope.base, inst.src, "unable to resolve comptime value", .{}); + } + } + unreachable; +} + +fn astGenExpr(self: *Module, scope: *Scope, ast_node: *ast.Node) InnerError!*zir.Inst { + switch (ast_node.id) { + .Identifier => return self.astGenIdent(scope, @fieldParentPtr(ast.Node.Identifier, "base", ast_node)), + .Asm => return self.astGenAsm(scope, @fieldParentPtr(ast.Node.Asm, "base", ast_node)), + .StringLiteral => return self.astGenStringLiteral(scope, @fieldParentPtr(ast.Node.StringLiteral, "base", ast_node)), + .IntegerLiteral => return self.astGenIntegerLiteral(scope, @fieldParentPtr(ast.Node.IntegerLiteral, "base", ast_node)), + .BuiltinCall => return self.astGenBuiltinCall(scope, @fieldParentPtr(ast.Node.BuiltinCall, "base", ast_node)), + .Call => return self.astGenCall(scope, @fieldParentPtr(ast.Node.Call, "base", ast_node)), + .Unreachable => return self.astGenUnreachable(scope, @fieldParentPtr(ast.Node.Unreachable, "base", ast_node)), + .ControlFlowExpression => return self.astGenControlFlowExpression(scope, @fieldParentPtr(ast.Node.ControlFlowExpression, "base", ast_node)), + else => return self.failNode(scope, ast_node, "TODO implement astGenExpr for {}", .{@tagName(ast_node.id)}), + } +} + +fn astGenControlFlowExpression( + self: *Module, + scope: *Scope, + cfe: *ast.Node.ControlFlowExpression, +) InnerError!*zir.Inst { + switch (cfe.kind) { + .Break => return self.failNode(scope, &cfe.base, "TODO implement astGenExpr for Break", .{}), + .Continue => return self.failNode(scope, &cfe.base, "TODO implement astGenExpr for Continue", .{}), + .Return => {}, + } + const tree = scope.tree(); + const src = tree.token_locs[cfe.ltoken].start; + if (cfe.rhs) |rhs_node| { + const operand = try self.astGenExpr(scope, rhs_node); + return self.addZIRInst(scope, src, zir.Inst.Return, .{ .operand = operand }, .{}); + } else { + return self.addZIRInst(scope, src, zir.Inst.ReturnVoid, .{}, .{}); + } +} + +fn astGenIdent(self: *Module, scope: *Scope, ident: *ast.Node.Identifier) InnerError!*zir.Inst { + const tree = scope.tree(); + const ident_name = tree.tokenSlice(ident.token); + if (mem.eql(u8, ident_name, "_")) { + return self.failNode(scope, &ident.base, "TODO implement '_' identifier", .{}); + } + + if (getSimplePrimitiveValue(ident_name)) |typed_value| { + const src = tree.token_locs[ident.token].start; + return self.addZIRInstConst(scope, src, typed_value); + } + + if (ident_name.len >= 2) integer: { + const first_c = ident_name[0]; + if (first_c == 'i' or first_c == 'u') { + const is_signed = first_c == 'i'; + const bit_count = std.fmt.parseInt(u16, ident_name[1..], 10) catch |err| switch (err) { + error.Overflow => return self.failNode( + scope, + &ident.base, + "primitive integer type '{}' exceeds maximum bit width of 65535", + .{ident_name}, + ), + error.InvalidCharacter => break :integer, + }; + return self.failNode(scope, &ident.base, "TODO implement arbitrary integer bitwidth types", .{}); + } + } + + if (self.lookupDeclName(scope, ident_name)) |decl| { + const src = tree.token_locs[ident.token].start; + return try self.addZIRInst(scope, src, zir.Inst.DeclValInModule, .{ .decl = decl }, .{}); + } + + return self.failNode(scope, &ident.base, "TODO implement local variable identifier lookup", .{}); +} + +fn astGenStringLiteral(self: *Module, scope: *Scope, str_lit: *ast.Node.StringLiteral) InnerError!*zir.Inst { + const tree = scope.tree(); + const unparsed_bytes = tree.tokenSlice(str_lit.token); + const arena = scope.arena(); + + var bad_index: usize = undefined; + const bytes = std.zig.parseStringLiteral(arena, unparsed_bytes, &bad_index) catch |err| switch (err) { + error.InvalidCharacter => { + const bad_byte = unparsed_bytes[bad_index]; + const src = tree.token_locs[str_lit.token].start; + return self.fail(scope, src + bad_index, "invalid string literal character: '{c}'\n", .{bad_byte}); + }, + else => |e| return e, }; + + const src = tree.token_locs[str_lit.token].start; + return self.addZIRInst(scope, src, zir.Inst.Str, .{ .bytes = bytes }, .{}); +} + +fn astGenIntegerLiteral(self: *Module, scope: *Scope, int_lit: *ast.Node.IntegerLiteral) InnerError!*zir.Inst { + const arena = scope.arena(); + const tree = scope.tree(); + const prefixed_bytes = tree.tokenSlice(int_lit.token); + const base = if (mem.startsWith(u8, prefixed_bytes, "0x")) + 16 + else if (mem.startsWith(u8, prefixed_bytes, "0o")) + 8 + else if (mem.startsWith(u8, prefixed_bytes, "0b")) + 2 + else + @as(u8, 10); + + const bytes = if (base == 10) + prefixed_bytes + else + prefixed_bytes[2..]; + + if (std.fmt.parseInt(u64, bytes, base)) |small_int| { + const int_payload = try arena.create(Value.Payload.Int_u64); + int_payload.* = .{ .int = small_int }; + const src = tree.token_locs[int_lit.token].start; + return self.addZIRInstConst(scope, src, .{ + .ty = Type.initTag(.comptime_int), + .val = Value.initPayload(&int_payload.base), + }); + } else |err| { + return self.failTok(scope, int_lit.token, "TODO implement int literals that don't fit in a u64", .{}); + } +} + +fn astGenBlock(self: *Module, scope: *Scope, block_node: *ast.Node.Block) !void { + const tracy = trace(@src()); + defer tracy.end(); + + if (block_node.label) |label| { + return self.failTok(scope, label, "TODO implement labeled blocks", .{}); + } + for (block_node.statements()) |statement| { + _ = try self.astGenExpr(scope, statement); + } +} + +fn astGenAsm(self: *Module, scope: *Scope, asm_node: *ast.Node.Asm) InnerError!*zir.Inst { + if (asm_node.outputs.len != 0) { + return self.failNode(scope, &asm_node.base, "TODO implement asm with an output", .{}); + } + const arena = scope.arena(); + const tree = scope.tree(); + + const inputs = try arena.alloc(*zir.Inst, asm_node.inputs.len); + const args = try arena.alloc(*zir.Inst, asm_node.inputs.len); + + for (asm_node.inputs) |input, i| { + // TODO semantically analyze constraints + inputs[i] = try self.astGenExpr(scope, input.constraint); + args[i] = try self.astGenExpr(scope, input.expr); + } + + const src = tree.token_locs[asm_node.asm_token].start; + const return_type = try self.addZIRInstConst(scope, src, .{ + .ty = Type.initTag(.type), + .val = Value.initTag(.void_type), + }); + const asm_inst = try self.addZIRInst(scope, src, zir.Inst.Asm, .{ + .asm_source = try self.astGenExpr(scope, asm_node.template), + .return_type = return_type, + }, .{ + .@"volatile" = asm_node.volatile_token != null, + //.clobbers = TODO handle clobbers + .inputs = inputs, + .args = args, + }); + return asm_inst; +} + +fn astGenBuiltinCall(self: *Module, scope: *Scope, call: *ast.Node.BuiltinCall) InnerError!*zir.Inst { + const tree = scope.tree(); + const builtin_name = tree.tokenSlice(call.builtin_token); + const src = tree.token_locs[call.builtin_token].start; + + inline for (std.meta.declarations(zir.Inst)) |inst| { + if (inst.data != .Type) continue; + const T = inst.data.Type; + if (!@hasDecl(T, "builtin_name")) continue; + if (std.mem.eql(u8, builtin_name, T.builtin_name)) { + var value: T = undefined; + const positionals = @typeInfo(std.meta.fieldInfo(T, "positionals").field_type).Struct; + if (positionals.fields.len == 0) { + return self.addZIRInst(scope, src, T, value.positionals, value.kw_args); + } + const arg_count: ?usize = if (positionals.fields[0].field_type == []*zir.Inst) null else positionals.fields.len; + if (arg_count) |some| { + if (call.params_len != some) { + return self.failTok(scope, call.builtin_token, "expected {} parameter, found {}", .{ some, call.params_len }); + } + const params = call.params(); + inline for (positionals.fields) |p, i| { + @field(value.positionals, p.name) = try self.astGenExpr(scope, params[i]); + } + } else { + return self.failTok(scope, call.builtin_token, "TODO var args builtin '{}'", .{builtin_name}); + } + + return self.addZIRInst(scope, src, T, value.positionals, .{}); + } + } + return self.failTok(scope, call.builtin_token, "TODO implement builtin call for '{}'", .{builtin_name}); +} + +fn astGenCall(self: *Module, scope: *Scope, call: *ast.Node.Call) InnerError!*zir.Inst { + const tree = scope.tree(); + + if (call.params_len != 0) { + return self.failNode(scope, &call.base, "TODO implement fn calls with parameters", .{}); + } + const lhs = try self.astGenExpr(scope, call.lhs); + + const src = tree.token_locs[call.lhs.firstToken()].start; + return self.addZIRInst(scope, src, zir.Inst.Call, .{ + .func = lhs, + .args = &[0]*zir.Inst{}, + }, .{}); +} + +fn astGenUnreachable(self: *Module, scope: *Scope, unreach_node: *ast.Node.Unreachable) InnerError!*zir.Inst { + const tree = scope.tree(); + const src = tree.token_locs[unreach_node.token].start; + return self.addZIRInst(scope, src, zir.Inst.Unreachable, .{}, .{}); +} + +fn getSimplePrimitiveValue(name: []const u8) ?TypedValue { + const simple_types = std.ComptimeStringMap(Value.Tag, .{ + .{ "u8", .u8_type }, + .{ "i8", .i8_type }, + .{ "isize", .isize_type }, + .{ "usize", .usize_type }, + .{ "c_short", .c_short_type }, + .{ "c_ushort", .c_ushort_type }, + .{ "c_int", .c_int_type }, + .{ "c_uint", .c_uint_type }, + .{ "c_long", .c_long_type }, + .{ "c_ulong", .c_ulong_type }, + .{ "c_longlong", .c_longlong_type }, + .{ "c_ulonglong", .c_ulonglong_type }, + .{ "c_longdouble", .c_longdouble_type }, + .{ "f16", .f16_type }, + .{ "f32", .f32_type }, + .{ "f64", .f64_type }, + .{ "f128", .f128_type }, + .{ "c_void", .c_void_type }, + .{ "bool", .bool_type }, + .{ "void", .void_type }, + .{ "type", .type_type }, + .{ "anyerror", .anyerror_type }, + .{ "comptime_int", .comptime_int_type }, + .{ "comptime_float", .comptime_float_type }, + .{ "noreturn", .noreturn_type }, + }); + if (simple_types.get(name)) |tag| { + return TypedValue{ + .ty = Type.initTag(.type), + .val = Value.initTag(tag), + }; + } + if (mem.eql(u8, name, "null")) { + return TypedValue{ + .ty = Type.initTag(.@"null"), + .val = Value.initTag(.null_value), + }; + } + if (mem.eql(u8, name, "undefined")) { + return TypedValue{ + .ty = Type.initTag(.@"undefined"), + .val = Value.initTag(.undef), + }; + } + if (mem.eql(u8, name, "true")) { + return TypedValue{ + .ty = Type.initTag(.bool), + .val = Value.initTag(.bool_true), + }; + } + if (mem.eql(u8, name, "false")) { + return TypedValue{ + .ty = Type.initTag(.bool), + .val = Value.initTag(.bool_false), + }; + } + return null; } fn declareDeclDependency(self: *Module, depender: *Decl, dependee: *Decl) !void { @@ -783,29 +1591,12 @@ fn declareDeclDependency(self: *Module, depender: *Decl, dependee: *Decl) !void } } -fn getSource(self: *Module, root_scope: *Scope.ZIRModule) ![:0]const u8 { - switch (root_scope.source) { - .unloaded => { - const source = try self.root_pkg.root_src_dir.readFileAllocOptions( - self.allocator, - root_scope.sub_file_path, - std.math.maxInt(u32), - 1, - 0, - ); - root_scope.source = .{ .bytes = source }; - return source; - }, - .bytes => |bytes| return bytes, - } -} - fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module { switch (root_scope.status) { .never_loaded, .unloaded_success => { try self.failed_files.ensureCapacity(self.failed_files.size + 1); - const source = try self.getSource(root_scope); + const source = try root_scope.getSource(self); var keep_zir_module = false; const zir_module = try self.allocator.create(zir.Module); @@ -816,7 +1607,7 @@ fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module { if (zir_module.error_msg) |src_err_msg| { self.failed_files.putAssumeCapacityNoClobber( - root_scope, + &root_scope.base, try ErrorMsg.create(self.allocator, src_err_msg.byte_offset, "{}", .{src_err_msg.msg}), ); root_scope.status = .unloaded_parse_failure; @@ -838,90 +1629,189 @@ fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module { } } -fn analyzeRoot(self: *Module, root_scope: *Scope.ZIRModule) !void { +fn getAstTree(self: *Module, root_scope: *Scope.File) !*ast.Tree { + const tracy = trace(@src()); + defer tracy.end(); + switch (root_scope.status) { - .never_loaded => { - const src_module = try self.getSrcModule(root_scope); + .never_loaded, .unloaded_success => { + try self.failed_files.ensureCapacity(self.failed_files.size + 1); - // Here we ensure enough queue capacity to store all the decls, so that later we can use - // appendAssumeCapacity. - try self.work_queue.ensureUnusedCapacity(src_module.decls.len); + const source = try root_scope.getSource(self); - for (src_module.decls) |decl| { - if (decl.cast(zir.Inst.Export)) |export_inst| { - _ = try self.resolveDecl(&root_scope.base, &export_inst.base); - } + var keep_tree = false; + const tree = try std.zig.parse(self.allocator, source); + defer if (!keep_tree) tree.deinit(); + + if (tree.errors.len != 0) { + const parse_err = tree.errors[0]; + + var msg = std.ArrayList(u8).init(self.allocator); + defer msg.deinit(); + + try parse_err.render(tree.token_ids, msg.outStream()); + const err_msg = try self.allocator.create(ErrorMsg); + err_msg.* = .{ + .msg = msg.toOwnedSlice(), + .byte_offset = tree.token_locs[parse_err.loc()].start, + }; + + self.failed_files.putAssumeCapacityNoClobber(&root_scope.base, err_msg); + root_scope.status = .unloaded_parse_failure; + return error.AnalysisFail; } + + root_scope.status = .loaded_success; + root_scope.contents = .{ .tree = tree }; + keep_tree = true; + + return tree; }, - .unloaded_parse_failure, - .unloaded_sema_failure, - .unloaded_success, - .loaded_sema_failure, - .loaded_success, - => { - const src_module = try self.getSrcModule(root_scope); - - var exports_to_resolve = std.ArrayList(*zir.Inst).init(self.allocator); - defer exports_to_resolve.deinit(); - - // Keep track of the decls that we expect to see in this file so that - // we know which ones have been deleted. - var deleted_decls = std.AutoHashMap(*Decl, void).init(self.allocator); - defer deleted_decls.deinit(); - try deleted_decls.ensureCapacity(self.decl_table.size); - { - var it = self.decl_table.iterator(); - while (it.next()) |kv| { - deleted_decls.putAssumeCapacityNoClobber(kv.value, {}); + .unloaded_parse_failure => return error.AnalysisFail, + + .loaded_success => return root_scope.contents.tree, + } +} + +fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void { + // We may be analyzing it for the first time, or this may be + // an incremental update. This code handles both cases. + const tree = try self.getAstTree(root_scope); + const decls = tree.root_node.decls(); + + try self.work_queue.ensureUnusedCapacity(decls.len); + try root_scope.decls.ensureCapacity(self.allocator, decls.len); + + // Keep track of the decls that we expect to see in this file so that + // we know which ones have been deleted. + var deleted_decls = std.AutoHashMap(*Decl, void).init(self.allocator); + defer deleted_decls.deinit(); + try deleted_decls.ensureCapacity(root_scope.decls.items.len); + for (root_scope.decls.items) |file_decl| { + deleted_decls.putAssumeCapacityNoClobber(file_decl, {}); + } + + for (decls) |src_decl, decl_i| { + if (src_decl.cast(ast.Node.FnProto)) |fn_proto| { + // We will create a Decl for it regardless of analysis status. + const name_tok = fn_proto.name_token orelse + @panic("TODO handle missing function name in the parser"); + const name_loc = tree.token_locs[name_tok]; + const name = tree.tokenSliceLoc(name_loc); + const name_hash = root_scope.fullyQualifiedNameHash(name); + const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl)); + if (self.decl_table.get(name_hash)) |kv| { + const decl = kv.value; + // Update the AST Node index of the decl, even if its contents are unchanged, it may + // have been re-ordered. + decl.src_index = decl_i; + deleted_decls.removeAssertDiscard(decl); + if (!srcHashEql(decl.contents_hash, contents_hash)) { + try self.markOutdatedDecl(decl); + decl.contents_hash = contents_hash; } - } - - for (src_module.decls) |src_decl| { - const name_hash = Decl.hashSimpleName(src_decl.name); - if (self.decl_table.get(name_hash)) |kv| { - const decl = kv.value; - deleted_decls.removeAssertDiscard(decl); - const new_contents_hash = Decl.hashSimpleName(src_decl.contents); - //std.debug.warn("'{}' contents: '{}'\n", .{ src_decl.name, src_decl.contents }); - if (!mem.eql(u8, &new_contents_hash, &decl.contents_hash)) { - //std.debug.warn("'{}' {x} => {x}\n", .{ src_decl.name, decl.contents_hash, new_contents_hash }); - try self.markOutdatedDecl(decl); - decl.contents_hash = new_contents_hash; + } else { + const new_decl = try self.createNewDecl(&root_scope.base, name, decl_i, name_hash, contents_hash); + root_scope.decls.appendAssumeCapacity(new_decl); + if (fn_proto.extern_export_inline_token) |maybe_export_token| { + if (tree.token_ids[maybe_export_token] == .Keyword_export) { + self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); } - } else if (src_decl.cast(zir.Inst.Export)) |export_inst| { - try exports_to_resolve.append(&export_inst.base); } } - { - // Handle explicitly deleted decls from the source code. Not to be confused - // with when we delete decls because they are no longer referenced. - var it = deleted_decls.iterator(); - while (it.next()) |kv| { - //std.debug.warn("noticed '{}' deleted from source\n", .{kv.key.name}); - try self.deleteDecl(kv.key); - } + } + // TODO also look for global variable declarations + // TODO also look for comptime blocks and exported globals + } + { + // Handle explicitly deleted decls from the source code. Not to be confused + // with when we delete decls because they are no longer referenced. + var it = deleted_decls.iterator(); + while (it.next()) |kv| { + //std.debug.warn("noticed '{}' deleted from source\n", .{kv.key.name}); + try self.deleteDecl(kv.key); + } + } +} + +fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void { + // We may be analyzing it for the first time, or this may be + // an incremental update. This code handles both cases. + const src_module = try self.getSrcModule(root_scope); + + try self.work_queue.ensureUnusedCapacity(src_module.decls.len); + try root_scope.decls.ensureCapacity(self.allocator, src_module.decls.len); + + var exports_to_resolve = std.ArrayList(*zir.Decl).init(self.allocator); + defer exports_to_resolve.deinit(); + + // Keep track of the decls that we expect to see in this file so that + // we know which ones have been deleted. + var deleted_decls = std.AutoHashMap(*Decl, void).init(self.allocator); + defer deleted_decls.deinit(); + try deleted_decls.ensureCapacity(self.decl_table.size); + { + var it = self.decl_table.iterator(); + while (it.next()) |kv| { + deleted_decls.putAssumeCapacityNoClobber(kv.value, {}); + } + } + + for (src_module.decls) |src_decl, decl_i| { + const name_hash = root_scope.fullyQualifiedNameHash(src_decl.name); + if (self.decl_table.get(name_hash)) |kv| { + const decl = kv.value; + deleted_decls.removeAssertDiscard(decl); + //std.debug.warn("'{}' contents: '{}'\n", .{ src_decl.name, src_decl.contents }); + if (!srcHashEql(src_decl.contents_hash, decl.contents_hash)) { + try self.markOutdatedDecl(decl); + decl.contents_hash = src_decl.contents_hash; } - for (exports_to_resolve.items) |export_inst| { - _ = try self.resolveDecl(&root_scope.base, export_inst); + } else { + const new_decl = try self.createNewDecl( + &root_scope.base, + src_decl.name, + decl_i, + name_hash, + src_decl.contents_hash, + ); + root_scope.decls.appendAssumeCapacity(new_decl); + if (src_decl.inst.cast(zir.Inst.Export)) |export_inst| { + try exports_to_resolve.append(src_decl); } - }, + } + } + for (exports_to_resolve.items) |export_decl| { + _ = try self.resolveZirDecl(&root_scope.base, export_decl); + } + { + // Handle explicitly deleted decls from the source code. Not to be confused + // with when we delete decls because they are no longer referenced. + var it = deleted_decls.iterator(); + while (it.next()) |kv| { + //std.debug.warn("noticed '{}' deleted from source\n", .{kv.key.name}); + try self.deleteDecl(kv.key); + } } } fn deleteDecl(self: *Module, decl: *Decl) !void { try self.deletion_set.ensureCapacity(self.allocator, self.deletion_set.items.len + decl.dependencies.items.len); + // Remove from the namespace it resides in. In the case of an anonymous Decl it will + // not be present in the set, and this does nothing. + decl.scope.removeDecl(decl); + //std.debug.warn("deleting decl '{}'\n", .{decl.name}); const name_hash = decl.fullyQualifiedNameHash(); self.decl_table.removeAssertDiscard(name_hash); // Remove itself from its dependencies, because we are about to destroy the decl pointer. for (decl.dependencies.items) |dep| { dep.removeDependant(decl); - if (dep.dependants.items.len == 0) { + if (dep.dependants.items.len == 0 and !dep.deletion_flag) { // We don't recursively perform a deletion here, because during the update, // another reference to it may turn up. - assert(!dep.deletion_flag); dep.deletion_flag = true; self.deletion_set.appendAssumeCapacity(dep); } @@ -974,83 +1864,89 @@ fn deleteDeclExports(self: *Module, decl: *Decl) void { } fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void { + const tracy = trace(@src()); + defer tracy.end(); + // Use the Decl's arena for function memory. var arena = decl.typed_value.most_recent.arena.?.promote(self.allocator); defer decl.typed_value.most_recent.arena.?.* = arena.state; - var analysis: Fn.Analysis = .{ - .inner_block = .{ - .func = func, - .decl = decl, - .instructions = .{}, - .arena = &arena.allocator, - }, - .needed_inst_capacity = 0, - .inst_table = std.AutoHashMap(*zir.Inst, *Inst).init(self.allocator), + var inner_block: Scope.Block = .{ + .func = func, + .decl = decl, + .instructions = .{}, + .arena = &arena.allocator, }; - defer analysis.inner_block.instructions.deinit(self.allocator); - defer analysis.inst_table.deinit(); + defer inner_block.instructions.deinit(self.allocator); - const fn_inst = func.analysis.queued; - func.analysis = .{ .in_progress = &analysis }; + const fn_zir = func.analysis.queued; + defer fn_zir.arena.promote(self.allocator).deinit(); + func.analysis = .{ .in_progress = {} }; + //std.debug.warn("set {} to in_progress\n", .{decl.name}); - try self.analyzeBody(&analysis.inner_block.base, fn_inst.positionals.body); + try self.analyzeBody(&inner_block.base, fn_zir.body); - func.analysis = .{ - .success = .{ - .instructions = try arena.allocator.dupe(*Inst, analysis.inner_block.instructions.items), - }, - }; + const instructions = try arena.allocator.dupe(*Inst, inner_block.instructions.items); + func.analysis = .{ .success = .{ .instructions = instructions } }; + //std.debug.warn("set {} to success\n", .{decl.name}); } -fn reAnalyzeDecl(self: *Module, decl: *Decl, old_inst: *zir.Inst) InnerError!void { - switch (decl.analysis) { - .in_progress => unreachable, - .dependency_failure, - .sema_failure, - .codegen_failure, - .codegen_failure_retryable, - .complete, - => return, - - .outdated => {}, // Decl re-analysis +fn markOutdatedDecl(self: *Module, decl: *Decl) !void { + //std.debug.warn("mark {} outdated\n", .{decl.name}); + try self.work_queue.writeItem(.{ .analyze_decl = decl }); + if (self.failed_decls.remove(decl)) |entry| { + entry.value.destroy(self.allocator); } - //std.debug.warn("re-analyzing {}\n", .{decl.name}); - decl.src = old_inst.src; + decl.analysis = .outdated; +} - // The exports this Decl performs will be re-discovered, so we remove them here - // prior to re-analysis. - self.deleteDeclExports(decl); - // Dependencies will be re-discovered, so we remove them here prior to re-analysis. - for (decl.dependencies.items) |dep| { - dep.removeDependant(decl); - if (dep.dependants.items.len == 0) { - // We don't perform a deletion here, because this Decl or another one - // may end up referencing it before the update is complete. - assert(!dep.deletion_flag); - dep.deletion_flag = true; - try self.deletion_set.append(self.allocator, dep); - } - } - decl.dependencies.shrink(self.allocator, 0); +fn allocateNewDecl( + self: *Module, + scope: *Scope, + src_index: usize, + contents_hash: std.zig.SrcHash, +) !*Decl { + const new_decl = try self.allocator.create(Decl); + new_decl.* = .{ + .name = "", + .scope = scope.namespace(), + .src_index = src_index, + .typed_value = .{ .never_succeeded = {} }, + .analysis = .unreferenced, + .deletion_flag = false, + .contents_hash = contents_hash, + .link = link.ElfFile.TextBlock.empty, + .generation = 0, + }; + return new_decl; +} + +fn createNewDecl( + self: *Module, + scope: *Scope, + decl_name: []const u8, + src_index: usize, + name_hash: Scope.NameHash, + contents_hash: std.zig.SrcHash, +) !*Decl { + try self.decl_table.ensureCapacity(self.decl_table.size + 1); + const new_decl = try self.allocateNewDecl(scope, src_index, contents_hash); + errdefer self.allocator.destroy(new_decl); + new_decl.name = try mem.dupeZ(self.allocator, u8, decl_name); + self.decl_table.putAssumeCapacityNoClobber(name_hash, new_decl); + return new_decl; +} + +fn analyzeZirDecl(self: *Module, decl: *Decl, src_decl: *zir.Decl) InnerError!bool { var decl_scope: Scope.DeclAnalysis = .{ .decl = decl, .arena = std.heap.ArenaAllocator.init(self.allocator), }; errdefer decl_scope.arena.deinit(); - const typed_value = self.analyzeInstConst(&decl_scope.base, old_inst) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => { - switch (decl.analysis) { - .in_progress => decl.analysis = .dependency_failure, - else => {}, - } - decl.generation = self.generation; - return error.AnalysisFail; - }, - }; + decl.analysis = .in_progress; + + const typed_value = try self.analyzeConstInst(&decl_scope.base, src_decl.inst); const arena_state = try decl_scope.arena.allocator.create(std.heap.ArenaAllocator.State); - arena_state.* = decl_scope.arena.state; var prev_type_has_bits = false; var type_changed = true; @@ -1061,6 +1957,8 @@ fn reAnalyzeDecl(self: *Module, decl: *Decl, old_inst: *zir.Inst) InnerError!voi tvm.deinit(self.allocator); } + + arena_state.* = decl_scope.arena.state; decl.typed_value = .{ .most_recent = .{ .typed_value = typed_value, @@ -1079,137 +1977,66 @@ fn reAnalyzeDecl(self: *Module, decl: *Decl, old_inst: *zir.Inst) InnerError!voi self.bin_file.freeDecl(decl); } - // If the decl is a function, and the type is the same, we do not need - // to chase the dependants. - if (type_changed or typed_value.val.tag() != .function) { - for (decl.dependants.items) |dep| { - switch (dep.analysis) { - .in_progress => unreachable, - .outdated => continue, // already queued for update - - .dependency_failure, - .sema_failure, - .codegen_failure, - .codegen_failure_retryable, - .complete, - => if (dep.generation != self.generation) { - try self.markOutdatedDecl(dep); - }, - } - } - } + return type_changed; } -fn markOutdatedDecl(self: *Module, decl: *Decl) !void { - //std.debug.warn("mark {} outdated\n", .{decl.name}); - try self.work_queue.writeItem(.{ .re_analyze_decl = decl }); - if (self.failed_decls.remove(decl)) |entry| { - entry.value.destroy(self.allocator); - } - decl.analysis = .outdated; +fn resolveZirDecl(self: *Module, scope: *Scope, src_decl: *zir.Decl) InnerError!*Decl { + const zir_module = self.root_scope.cast(Scope.ZIRModule).?; + const entry = zir_module.contents.module.findDecl(src_decl.name).?; + return self.resolveZirDeclHavingIndex(scope, src_decl, entry.index); } -fn resolveDecl(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*Decl { - const hash = Decl.hashSimpleName(old_inst.name); - if (self.decl_table.get(hash)) |kv| { - const decl = kv.value; - try self.reAnalyzeDecl(decl, old_inst); - return decl; - } else if (old_inst.cast(zir.Inst.DeclVal)) |decl_val| { - // This is just a named reference to another decl. - return self.analyzeDeclVal(scope, decl_val); - } else { - const new_decl = blk: { - try self.decl_table.ensureCapacity(self.decl_table.size + 1); - const new_decl = try self.allocator.create(Decl); - errdefer self.allocator.destroy(new_decl); - const name = try mem.dupeZ(self.allocator, u8, old_inst.name); - errdefer self.allocator.free(name); - new_decl.* = .{ - .name = name, - .scope = scope.namespace(), - .src = old_inst.src, - .typed_value = .{ .never_succeeded = {} }, - .analysis = .in_progress, - .deletion_flag = false, - .contents_hash = Decl.hashSimpleName(old_inst.contents), - .link = link.ElfFile.TextBlock.empty, - .generation = 0, - }; - self.decl_table.putAssumeCapacityNoClobber(hash, new_decl); - break :blk new_decl; - }; - - var decl_scope: Scope.DeclAnalysis = .{ - .decl = new_decl, - .arena = std.heap.ArenaAllocator.init(self.allocator), - }; - errdefer decl_scope.arena.deinit(); - - const typed_value = self.analyzeInstConst(&decl_scope.base, old_inst) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => { - switch (new_decl.analysis) { - .in_progress => new_decl.analysis = .dependency_failure, - else => {}, - } - new_decl.generation = self.generation; - return error.AnalysisFail; - }, - }; - const arena_state = try decl_scope.arena.allocator.create(std.heap.ArenaAllocator.State); - - arena_state.* = decl_scope.arena.state; - - new_decl.typed_value = .{ - .most_recent = .{ - .typed_value = typed_value, - .arena = arena_state, - }, - }; - new_decl.analysis = .complete; - new_decl.generation = self.generation; - if (typed_value.ty.hasCodeGenBits()) { - // We don't fully codegen the decl until later, but we do need to reserve a global - // offset table index for it. This allows us to codegen decls out of dependency order, - // increasing how many computations can be done in parallel. - try self.bin_file.allocateDeclIndexes(new_decl); - try self.work_queue.writeItem(.{ .codegen_decl = new_decl }); - } - return new_decl; - } +fn resolveZirDeclHavingIndex(self: *Module, scope: *Scope, src_decl: *zir.Decl, src_index: usize) InnerError!*Decl { + const name_hash = scope.namespace().fullyQualifiedNameHash(src_decl.name); + const decl = self.decl_table.getValue(name_hash).?; + decl.src_index = src_index; + try self.ensureDeclAnalyzed(decl); + return decl; } /// Declares a dependency on the decl. -fn resolveCompleteDecl(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*Decl { - const decl = try self.resolveDecl(scope, old_inst); +fn resolveCompleteZirDecl(self: *Module, scope: *Scope, src_decl: *zir.Decl) InnerError!*Decl { + const decl = try self.resolveZirDecl(scope, src_decl); switch (decl.analysis) { + .unreferenced => unreachable, .in_progress => unreachable, .outdated => unreachable, .dependency_failure, .sema_failure, + .sema_failure_retryable, .codegen_failure, .codegen_failure_retryable, => return error.AnalysisFail, .complete => {}, } - if (scope.decl()) |scope_decl| { - try self.declareDeclDependency(scope_decl, decl); - } return decl; } +/// TODO Look into removing this function. The body is only needed for .zir files, not .zig files. fn resolveInst(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*Inst { - if (scope.cast(Scope.Block)) |block| { - if (block.func.analysis.in_progress.inst_table.get(old_inst)) |kv| { - return kv.value; - } - } - - const decl = try self.resolveCompleteDecl(scope, old_inst); + if (old_inst.analyzed_inst) |inst| return inst; + + // If this assert trips, the instruction that was referenced did not get properly + // analyzed before it was referenced. + const zir_module = scope.namespace().cast(Scope.ZIRModule).?; + const entry = if (old_inst.cast(zir.Inst.DeclVal)) |declval| blk: { + const decl_name = declval.positionals.name; + const entry = zir_module.contents.module.findDecl(decl_name) orelse + return self.fail(scope, old_inst.src, "decl '{}' not found", .{decl_name}); + break :blk entry; + } else blk: { + // If this assert trips, the instruction that was referenced did not get + // properly analyzed by a previous instruction analysis before it was + // referenced by the current one. + break :blk zir_module.contents.module.findInstDecl(old_inst).?; + }; + const decl = try self.resolveCompleteZirDecl(scope, entry.decl); const decl_ref = try self.analyzeDeclRef(scope, old_inst.src, decl); + // Note: it would be tempting here to store the result into old_inst.analyzed_inst field, + // but this would prevent the analyzeDeclRef from happening, which is needed to properly + // detect Decl dependencies and dependency failures on updates. return self.analyzeDeref(scope, old_inst.src, decl_ref, old_inst.src); } @@ -1258,21 +2085,16 @@ fn resolveType(self: *Module, scope: *Scope, old_inst: *zir.Inst) !Type { return val.toType(); } -fn analyzeExport(self: *Module, scope: *Scope, export_inst: *zir.Inst.Export) InnerError!void { - try self.decl_exports.ensureCapacity(self.decl_exports.size + 1); - try self.export_owners.ensureCapacity(self.export_owners.size + 1); - const symbol_name = try self.resolveConstString(scope, export_inst.positionals.symbol_name); - const exported_decl = try self.resolveCompleteDecl(scope, export_inst.positionals.value); +fn analyzeExport(self: *Module, scope: *Scope, src: usize, symbol_name: []const u8, exported_decl: *Decl) !void { + try self.ensureDeclAnalyzed(exported_decl); const typed_value = exported_decl.typed_value.most_recent.typed_value; switch (typed_value.ty.zigTypeTag()) { .Fn => {}, - else => return self.fail( - scope, - export_inst.positionals.value.src, - "unable to export type '{}'", - .{typed_value.ty}, - ), + else => return self.fail(scope, src, "unable to export type '{}'", .{typed_value.ty}), } + try self.decl_exports.ensureCapacity(self.decl_exports.size + 1); + try self.export_owners.ensureCapacity(self.export_owners.size + 1); + const new_export = try self.allocator.create(Export); errdefer self.allocator.destroy(new_export); @@ -1280,7 +2102,7 @@ fn analyzeExport(self: *Module, scope: *Scope, export_inst: *zir.Inst.Export) In new_export.* = .{ .options = .{ .name = symbol_name }, - .src = export_inst.base.src, + .src = src, .link = .{}, .owner_decl = owner_decl, .exported_decl = exported_decl, @@ -1311,7 +2133,7 @@ fn analyzeExport(self: *Module, scope: *Scope, export_inst: *zir.Inst.Export) In try self.failed_exports.ensureCapacity(self.failed_exports.size + 1); self.failed_exports.putAssumeCapacityNoClobber(new_export, try ErrorMsg.create( self.allocator, - export_inst.base.src, + src, "unable to export: {}", .{@errorName(err)}, )); @@ -1320,7 +2142,6 @@ fn analyzeExport(self: *Module, scope: *Scope, export_inst: *zir.Inst.Export) In }; } -/// TODO should not need the cast on the last parameter at the callsites fn addNewInstArgs( self: *Module, block: *Scope.Block, @@ -1334,6 +2155,46 @@ fn addNewInstArgs( return &inst.base; } +fn newZIRInst( + allocator: *Allocator, + src: usize, + comptime T: type, + positionals: std.meta.fieldInfo(T, "positionals").field_type, + kw_args: std.meta.fieldInfo(T, "kw_args").field_type, +) !*zir.Inst { + const inst = try allocator.create(T); + inst.* = .{ + .base = .{ + .tag = T.base_tag, + .src = src, + }, + .positionals = positionals, + .kw_args = kw_args, + }; + return &inst.base; +} + +fn addZIRInst( + self: *Module, + scope: *Scope, + src: usize, + comptime T: type, + positionals: std.meta.fieldInfo(T, "positionals").field_type, + kw_args: std.meta.fieldInfo(T, "kw_args").field_type, +) !*zir.Inst { + const gen_zir = scope.cast(Scope.GenZIR).?; + try gen_zir.instructions.ensureCapacity(gen_zir.instructions.items.len + 1); + const inst = try newZIRInst(&gen_zir.arena.allocator, src, T, positionals, kw_args); + gen_zir.instructions.appendAssumeCapacity(inst); + return inst; +} + +/// TODO The existence of this function is a workaround for a bug in stage1. +fn addZIRInstConst(self: *Module, scope: *Scope, src: usize, typed_value: TypedValue) !*zir.Inst { + const P = std.meta.fieldInfo(zir.Inst.Const, "positionals").field_type; + return self.addZIRInst(scope, src, zir.Inst.Const, P{ .typed_value = typed_value }, .{}); +} + fn addNewInst(self: *Module, block: *Scope.Block, src: usize, ty: Type, comptime T: type) !*T { const inst = try block.arena.create(T); inst.* = .{ @@ -1361,19 +2222,6 @@ fn constInst(self: *Module, scope: *Scope, src: usize, typed_value: TypedValue) return &const_inst.base; } -fn constStr(self: *Module, scope: *Scope, src: usize, str: []const u8) !*Inst { - const ty_payload = try scope.arena().create(Type.Payload.Array_u8_Sentinel0); - ty_payload.* = .{ .len = str.len }; - - const bytes_payload = try scope.arena().create(Value.Payload.Bytes); - bytes_payload.* = .{ .data = str }; - - return self.constInst(scope, src, .{ - .ty = Type.initPayload(&ty_payload.base), - .val = Value.initPayload(&bytes_payload.base), - }); -} - fn constType(self: *Module, scope: *Scope, src: usize, ty: Type) !*Inst { return self.constInst(scope, src, .{ .ty = Type.initTag(.type), @@ -1388,6 +2236,13 @@ fn constVoid(self: *Module, scope: *Scope, src: usize) !*Inst { }); } +fn constNoReturn(self: *Module, scope: *Scope, src: usize) !*Inst { + return self.constInst(scope, src, .{ + .ty = Type.initTag(.noreturn), + .val = Value.initTag(.the_one_possible_value), + }); +} + fn constUndef(self: *Module, scope: *Scope, src: usize, ty: Type) !*Inst { return self.constInst(scope, src, .{ .ty = ty, @@ -1451,7 +2306,7 @@ fn constIntBig(self: *Module, scope: *Scope, src: usize, ty: Type, big_int: BigI }); } -fn analyzeInstConst(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!TypedValue { +fn analyzeConstInst(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!TypedValue { const new_inst = try self.analyzeInst(scope, old_inst); return TypedValue{ .ty = new_inst.ty, @@ -1459,20 +2314,24 @@ fn analyzeInstConst(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerErro }; } +fn analyzeInstConst(self: *Module, scope: *Scope, const_inst: *zir.Inst.Const) InnerError!*Inst { + // Move the TypedValue from old memory to new memory. This allows freeing the ZIR instructions + // after analysis. + const typed_value_copy = try const_inst.positionals.typed_value.copy(scope.arena()); + return self.constInst(scope, const_inst.base.src, typed_value_copy); +} + fn analyzeInst(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*Inst { switch (old_inst.tag) { .breakpoint => return self.analyzeInstBreakpoint(scope, old_inst.cast(zir.Inst.Breakpoint).?), .call => return self.analyzeInstCall(scope, old_inst.cast(zir.Inst.Call).?), .compileerror => return self.analyzeInstCompileError(scope, old_inst.cast(zir.Inst.CompileError).?), + .@"const" => return self.analyzeInstConst(scope, old_inst.cast(zir.Inst.Const).?), .declref => return self.analyzeInstDeclRef(scope, old_inst.cast(zir.Inst.DeclRef).?), + .declref_str => return self.analyzeInstDeclRefStr(scope, old_inst.cast(zir.Inst.DeclRefStr).?), .declval => return self.analyzeInstDeclVal(scope, old_inst.cast(zir.Inst.DeclVal).?), - .str => { - const bytes = old_inst.cast(zir.Inst.Str).?.positionals.bytes; - // The bytes references memory inside the ZIR module, which can get deallocated - // after semantic analysis is complete. We need the memory to be in the Decl's arena. - const arena_bytes = try scope.arena().dupe(u8, bytes); - return self.constStr(scope, old_inst.src, arena_bytes); - }, + .declval_in_module => return self.analyzeInstDeclValInModule(scope, old_inst.cast(zir.Inst.DeclValInModule).?), + .str => return self.analyzeInstStr(scope, old_inst.cast(zir.Inst.Str).?), .int => { const big_int = old_inst.cast(zir.Inst.Int).?.positionals.int; return self.constIntBig(scope, old_inst.src, Type.initTag(.comptime_int), big_int); @@ -1484,13 +2343,10 @@ fn analyzeInst(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*In .@"asm" => return self.analyzeInstAsm(scope, old_inst.cast(zir.Inst.Asm).?), .@"unreachable" => return self.analyzeInstUnreachable(scope, old_inst.cast(zir.Inst.Unreachable).?), .@"return" => return self.analyzeInstRet(scope, old_inst.cast(zir.Inst.Return).?), + .returnvoid => return self.analyzeInstRetVoid(scope, old_inst.cast(zir.Inst.ReturnVoid).?), .@"fn" => return self.analyzeInstFn(scope, old_inst.cast(zir.Inst.Fn).?), - .@"export" => { - try self.analyzeExport(scope, old_inst.cast(zir.Inst.Export).?); - return self.constVoid(scope, old_inst.src); - }, + .@"export" => return self.analyzeInstExport(scope, old_inst.cast(zir.Inst.Export).?), .primitive => return self.analyzeInstPrimitive(scope, old_inst.cast(zir.Inst.Primitive).?), - .ref => return self.analyzeInstRef(scope, old_inst.cast(zir.Inst.Ref).?), .fntype => return self.analyzeInstFnType(scope, old_inst.cast(zir.Inst.FnType).?), .intcast => return self.analyzeInstIntCast(scope, old_inst.cast(zir.Inst.IntCast).?), .bitcast => return self.analyzeInstBitCast(scope, old_inst.cast(zir.Inst.BitCast).?), @@ -1503,39 +2359,104 @@ fn analyzeInst(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*In } } +fn analyzeInstStr(self: *Module, scope: *Scope, str_inst: *zir.Inst.Str) InnerError!*Inst { + // The bytes references memory inside the ZIR module, which can get deallocated + // after semantic analysis is complete. We need the memory to be in the new anonymous Decl's arena. + var new_decl_arena = std.heap.ArenaAllocator.init(self.allocator); + const arena_bytes = try new_decl_arena.allocator.dupe(u8, str_inst.positionals.bytes); + + const ty_payload = try scope.arena().create(Type.Payload.Array_u8_Sentinel0); + ty_payload.* = .{ .len = arena_bytes.len }; + + const bytes_payload = try scope.arena().create(Value.Payload.Bytes); + bytes_payload.* = .{ .data = arena_bytes }; + + const new_decl = try self.createAnonymousDecl(scope, &new_decl_arena, .{ + .ty = Type.initPayload(&ty_payload.base), + .val = Value.initPayload(&bytes_payload.base), + }); + return self.analyzeDeclRef(scope, str_inst.base.src, new_decl); +} + +fn createAnonymousDecl( + self: *Module, + scope: *Scope, + decl_arena: *std.heap.ArenaAllocator, + typed_value: TypedValue, +) !*Decl { + const name_index = self.getNextAnonNameIndex(); + const scope_decl = scope.decl().?; + const name = try std.fmt.allocPrint(self.allocator, "{}${}", .{ scope_decl.name, name_index }); + defer self.allocator.free(name); + const name_hash = scope.namespace().fullyQualifiedNameHash(name); + const src_hash: std.zig.SrcHash = undefined; + const new_decl = try self.createNewDecl(scope, name, scope_decl.src_index, name_hash, src_hash); + const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State); + + decl_arena_state.* = decl_arena.state; + new_decl.typed_value = .{ + .most_recent = .{ + .typed_value = typed_value, + .arena = decl_arena_state, + }, + }; + new_decl.analysis = .complete; + new_decl.generation = self.generation; + + // TODO: This generates the Decl into the machine code file if it is of a type that is non-zero size. + // We should be able to further improve the compiler to not omit Decls which are only referenced at + // compile-time and not runtime. + if (typed_value.ty.hasCodeGenBits()) { + try self.bin_file.allocateDeclIndexes(new_decl); + try self.work_queue.writeItem(.{ .codegen_decl = new_decl }); + } + + return new_decl; +} + +fn getNextAnonNameIndex(self: *Module) usize { + return @atomicRmw(usize, &self.next_anon_name_index, .Add, 1, .Monotonic); +} + +fn lookupDeclName(self: *Module, scope: *Scope, ident_name: []const u8) ?*Decl { + const namespace = scope.namespace(); + const name_hash = namespace.fullyQualifiedNameHash(ident_name); + return self.decl_table.getValue(name_hash); +} + +fn analyzeInstExport(self: *Module, scope: *Scope, export_inst: *zir.Inst.Export) InnerError!*Inst { + const symbol_name = try self.resolveConstString(scope, export_inst.positionals.symbol_name); + const exported_decl = self.lookupDeclName(scope, export_inst.positionals.decl_name) orelse + return self.fail(scope, export_inst.base.src, "decl '{}' not found", .{export_inst.positionals.decl_name}); + try self.analyzeExport(scope, export_inst.base.src, symbol_name, exported_decl); + return self.constVoid(scope, export_inst.base.src); +} + fn analyzeInstCompileError(self: *Module, scope: *Scope, inst: *zir.Inst.CompileError) InnerError!*Inst { return self.fail(scope, inst.base.src, "{}", .{inst.positionals.msg}); } fn analyzeInstBreakpoint(self: *Module, scope: *Scope, inst: *zir.Inst.Breakpoint) InnerError!*Inst { const b = try self.requireRuntimeBlock(scope, inst.base.src); - return self.addNewInstArgs(b, inst.base.src, Type.initTag(.void), Inst.Breakpoint, Inst.Args(Inst.Breakpoint){}); + return self.addNewInstArgs(b, inst.base.src, Type.initTag(.void), Inst.Breakpoint, {}); } -fn analyzeInstRef(self: *Module, scope: *Scope, inst: *zir.Inst.Ref) InnerError!*Inst { - const decl = try self.resolveCompleteDecl(scope, inst.positionals.operand); - return self.analyzeDeclRef(scope, inst.base.src, decl); +fn analyzeInstDeclRefStr(self: *Module, scope: *Scope, inst: *zir.Inst.DeclRefStr) InnerError!*Inst { + const decl_name = try self.resolveConstString(scope, inst.positionals.name); + return self.analyzeDeclRefByName(scope, inst.base.src, decl_name); } fn analyzeInstDeclRef(self: *Module, scope: *Scope, inst: *zir.Inst.DeclRef) InnerError!*Inst { - const decl_name = try self.resolveConstString(scope, inst.positionals.name); - // This will need to get more fleshed out when there are proper structs & namespaces. - const zir_module = scope.namespace(); - const src_decl = zir_module.contents.module.findDecl(decl_name) orelse - return self.fail(scope, inst.positionals.name.src, "use of undeclared identifier '{}'", .{decl_name}); - - const decl = try self.resolveCompleteDecl(scope, src_decl); - return self.analyzeDeclRef(scope, inst.base.src, decl); + return self.analyzeDeclRefByName(scope, inst.base.src, inst.positionals.name); } fn analyzeDeclVal(self: *Module, scope: *Scope, inst: *zir.Inst.DeclVal) InnerError!*Decl { const decl_name = inst.positionals.name; - // This will need to get more fleshed out when there are proper structs & namespaces. - const zir_module = scope.namespace(); + const zir_module = scope.namespace().cast(Scope.ZIRModule).?; const src_decl = zir_module.contents.module.findDecl(decl_name) orelse return self.fail(scope, inst.base.src, "use of undeclared identifier '{}'", .{decl_name}); - const decl = try self.resolveCompleteDecl(scope, src_decl); + const decl = try self.resolveCompleteZirDecl(scope, src_decl.decl); return decl; } @@ -1546,18 +2467,46 @@ fn analyzeInstDeclVal(self: *Module, scope: *Scope, inst: *zir.Inst.DeclVal) Inn return self.analyzeDeref(scope, inst.base.src, ptr, inst.base.src); } +fn analyzeInstDeclValInModule(self: *Module, scope: *Scope, inst: *zir.Inst.DeclValInModule) InnerError!*Inst { + const decl = inst.positionals.decl; + const ptr = try self.analyzeDeclRef(scope, inst.base.src, decl); + return self.analyzeDeref(scope, inst.base.src, ptr, inst.base.src); +} + fn analyzeDeclRef(self: *Module, scope: *Scope, src: usize, decl: *Decl) InnerError!*Inst { + const scope_decl = scope.decl().?; + try self.declareDeclDependency(scope_decl, decl); + self.ensureDeclAnalyzed(decl) catch |err| { + if (scope.cast(Scope.Block)) |block| { + if (block.func) |func| { + func.analysis = .dependency_failure; + } else { + block.decl.analysis = .dependency_failure; + } + } else { + scope_decl.analysis = .dependency_failure; + } + return err; + }; + const decl_tv = try decl.typedValue(); const ty_payload = try scope.arena().create(Type.Payload.SingleConstPointer); ty_payload.* = .{ .pointee_type = decl_tv.ty }; const val_payload = try scope.arena().create(Value.Payload.DeclRef); val_payload.* = .{ .decl = decl }; + return self.constInst(scope, src, .{ .ty = Type.initPayload(&ty_payload.base), .val = Value.initPayload(&val_payload.base), }); } +fn analyzeDeclRefByName(self: *Module, scope: *Scope, src: usize, decl_name: []const u8) InnerError!*Inst { + const decl = self.lookupDeclName(scope, decl_name) orelse + return self.fail(scope, src, "decl '{}' not found", .{decl_name}); + return self.analyzeDeclRef(scope, src, decl); +} + fn analyzeInstCall(self: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError!*Inst { const func = try self.resolveInst(scope, inst.positionals.func); if (func.ty.zigTypeTag() != .Fn) @@ -1616,7 +2565,7 @@ fn analyzeInstCall(self: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerErro } const b = try self.requireRuntimeBlock(scope, inst.base.src); - return self.addNewInstArgs(b, inst.base.src, Type.initTag(.void), Inst.Call, Inst.Args(Inst.Call){ + return self.addNewInstArgs(b, inst.base.src, Type.initTag(.void), Inst.Call, .{ .func = func, .args = casted_args, }); @@ -1624,10 +2573,22 @@ fn analyzeInstCall(self: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerErro fn analyzeInstFn(self: *Module, scope: *Scope, fn_inst: *zir.Inst.Fn) InnerError!*Inst { const fn_type = try self.resolveType(scope, fn_inst.positionals.fn_type); + const fn_zir = blk: { + var fn_arena = std.heap.ArenaAllocator.init(self.allocator); + errdefer fn_arena.deinit(); + + const fn_zir = try scope.arena().create(Fn.ZIR); + fn_zir.* = .{ + .body = .{ + .instructions = fn_inst.positionals.body.instructions, + }, + .arena = fn_arena.state, + }; + break :blk fn_zir; + }; const new_func = try scope.arena().create(Fn); new_func.* = .{ - .fn_type = fn_type, - .analysis = .{ .queued = fn_inst }, + .analysis = .{ .queued = fn_zir }, .owner_decl = scope.decl().?, }; const fn_payload = try scope.arena().create(Value.Payload.Function); @@ -1648,6 +2609,13 @@ fn analyzeInstFnType(self: *Module, scope: *Scope, fntype: *zir.Inst.FnType) Inn return self.constType(scope, fntype.base.src, Type.initTag(.fn_noreturn_no_args)); } + if (return_type.zigTypeTag() == .Void and + fntype.positionals.param_types.len == 0 and + fntype.kw_args.cc == .Unspecified) + { + return self.constType(scope, fntype.base.src, Type.initTag(.fn_void_no_args)); + } + if (return_type.zigTypeTag() == .NoReturn and fntype.positionals.param_types.len == 0 and fntype.kw_args.cc == .Naked) @@ -1683,7 +2651,7 @@ fn analyzeInstPtrToInt(self: *Module, scope: *Scope, ptrtoint: *zir.Inst.PtrToIn // TODO handle known-pointer-address const b = try self.requireRuntimeBlock(scope, ptrtoint.base.src); const ty = Type.initTag(.usize); - return self.addNewInstArgs(b, ptrtoint.base.src, ty, Inst.PtrToInt, Inst.Args(Inst.PtrToInt){ .ptr = ptr }); + return self.addNewInstArgs(b, ptrtoint.base.src, ty, Inst.PtrToInt, .{ .ptr = ptr }); } fn analyzeInstFieldPtr(self: *Module, scope: *Scope, fieldptr: *zir.Inst.FieldPtr) InnerError!*Inst { @@ -1875,7 +2843,7 @@ fn analyzeInstAsm(self: *Module, scope: *Scope, assembly: *zir.Inst.Asm) InnerEr } const b = try self.requireRuntimeBlock(scope, assembly.base.src); - return self.addNewInstArgs(b, assembly.base.src, return_type, Inst.Assembly, Inst.Args(Inst.Assembly){ + return self.addNewInstArgs(b, assembly.base.src, return_type, Inst.Assembly, .{ .asm_source = asm_source, .is_volatile = assembly.kw_args.@"volatile", .output = output, @@ -1911,20 +2879,12 @@ fn analyzeInstCmp(self: *Module, scope: *Scope, inst: *zir.Inst.Cmp) InnerError! } const b = try self.requireRuntimeBlock(scope, inst.base.src); switch (op) { - .eq => return self.addNewInstArgs( - b, - inst.base.src, - Type.initTag(.bool), - Inst.IsNull, - Inst.Args(Inst.IsNull){ .operand = opt_operand }, - ), - .neq => return self.addNewInstArgs( - b, - inst.base.src, - Type.initTag(.bool), - Inst.IsNonNull, - Inst.Args(Inst.IsNonNull){ .operand = opt_operand }, - ), + .eq => return self.addNewInstArgs(b, inst.base.src, Type.initTag(.bool), Inst.IsNull, .{ + .operand = opt_operand, + }), + .neq => return self.addNewInstArgs(b, inst.base.src, Type.initTag(.bool), Inst.IsNonNull, .{ + .operand = opt_operand, + }), else => unreachable, } } else if (is_equality_cmp and @@ -2019,23 +2979,19 @@ fn analyzeInstUnreachable(self: *Module, scope: *Scope, unreach: *zir.Inst.Unrea } fn analyzeInstRet(self: *Module, scope: *Scope, inst: *zir.Inst.Return) InnerError!*Inst { + const operand = try self.resolveInst(scope, inst.positionals.operand); const b = try self.requireRuntimeBlock(scope, inst.base.src); - return self.addNewInstArgs(b, inst.base.src, Type.initTag(.noreturn), Inst.Ret, {}); + return self.addNewInstArgs(b, inst.base.src, Type.initTag(.noreturn), Inst.Ret, .{ .operand = operand }); +} + +fn analyzeInstRetVoid(self: *Module, scope: *Scope, inst: *zir.Inst.ReturnVoid) InnerError!*Inst { + const b = try self.requireRuntimeBlock(scope, inst.base.src); + return self.addNewInstArgs(b, inst.base.src, Type.initTag(.noreturn), Inst.RetVoid, {}); } fn analyzeBody(self: *Module, scope: *Scope, body: zir.Module.Body) !void { - if (scope.cast(Scope.Block)) |b| { - const analysis = b.func.analysis.in_progress; - analysis.needed_inst_capacity += body.instructions.len; - try analysis.inst_table.ensureCapacity(analysis.needed_inst_capacity); - for (body.instructions) |src_inst| { - const new_inst = try self.analyzeInst(scope, src_inst); - analysis.inst_table.putAssumeCapacityNoClobber(src_inst, new_inst); - } - } else { - for (body.instructions) |src_inst| { - _ = try self.analyzeInst(scope, src_inst); - } + for (body.instructions) |src_inst| { + src_inst.analyzed_inst = try self.analyzeInst(scope, src_inst); } } @@ -2118,7 +3074,7 @@ fn cmpNumeric( }; const casted_lhs = try self.coerce(scope, dest_type, lhs); const casted_rhs = try self.coerce(scope, dest_type, rhs); - return self.addNewInstArgs(b, src, dest_type, Inst.Cmp, Inst.Args(Inst.Cmp){ + return self.addNewInstArgs(b, src, dest_type, Inst.Cmp, .{ .lhs = casted_lhs, .rhs = casted_rhs, .op = op, @@ -2222,7 +3178,7 @@ fn cmpNumeric( const casted_lhs = try self.coerce(scope, dest_type, lhs); const casted_rhs = try self.coerce(scope, dest_type, lhs); - return self.addNewInstArgs(b, src, dest_type, Inst.Cmp, Inst.Args(Inst.Cmp){ + return self.addNewInstArgs(b, src, dest_type, Inst.Cmp, .{ .lhs = casted_lhs, .rhs = casted_rhs, .op = op, @@ -2299,7 +3255,7 @@ fn bitcast(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst { } // TODO validate the type size and other compile errors const b = try self.requireRuntimeBlock(scope, inst.src); - return self.addNewInstArgs(b, inst.src, dest_type, Inst.BitCast, Inst.Args(Inst.BitCast){ .operand = inst }); + return self.addNewInstArgs(b, inst.src, dest_type, Inst.BitCast, .{ .operand = inst }); } fn coerceArrayPtrToSlice(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst { @@ -2316,6 +3272,30 @@ fn fail(self: *Module, scope: *Scope, src: usize, comptime format: []const u8, a return self.failWithOwnedErrorMsg(scope, src, err_msg); } +fn failTok( + self: *Module, + scope: *Scope, + token_index: ast.TokenIndex, + comptime format: []const u8, + args: var, +) InnerError { + @setCold(true); + const src = scope.tree().token_locs[token_index].start; + return self.fail(scope, src, format, args); +} + +fn failNode( + self: *Module, + scope: *Scope, + ast_node: *ast.Node, + comptime format: []const u8, + args: var, +) InnerError { + @setCold(true); + const src = scope.tree().token_locs[ast_node.firstToken()].start; + return self.fail(scope, src, format, args); +} + fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *ErrorMsg) InnerError { { errdefer err_msg.destroy(self.allocator); @@ -2326,18 +3306,31 @@ fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *Err .decl => { const decl = scope.cast(Scope.DeclAnalysis).?.decl; decl.analysis = .sema_failure; + decl.generation = self.generation; self.failed_decls.putAssumeCapacityNoClobber(decl, err_msg); }, .block => { const block = scope.cast(Scope.Block).?; - block.func.analysis = .sema_failure; + if (block.func) |func| { + func.analysis = .sema_failure; + } else { + block.decl.analysis = .sema_failure; + block.decl.generation = self.generation; + } self.failed_decls.putAssumeCapacityNoClobber(block.decl, err_msg); }, + .gen_zir => { + const gen_zir = scope.cast(Scope.GenZIR).?; + gen_zir.decl.analysis = .sema_failure; + gen_zir.decl.generation = self.generation; + self.failed_decls.putAssumeCapacityNoClobber(gen_zir.decl, err_msg); + }, .zir_module => { const zir_module = scope.cast(Scope.ZIRModule).?; zir_module.status = .loaded_sema_failure; - self.failed_files.putAssumeCapacityNoClobber(zir_module, err_msg); + self.failed_files.putAssumeCapacityNoClobber(scope, err_msg); }, + .file => unreachable, } return error.AnalysisFail; } @@ -2385,3 +3378,7 @@ pub const ErrorMsg = struct { self.* = undefined; } }; + +fn srcHashEql(a: std.zig.SrcHash, b: std.zig.SrcHash) bool { + return @bitCast(u128, a) == @bitCast(u128, b); +} |
