aboutsummaryrefslogtreecommitdiff
path: root/src-self-hosted
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2020-06-24 22:37:58 -0400
committerGitHub <noreply@github.com>2020-06-24 22:37:58 -0400
commitd337469e4484ffd160b4508e2366fefd435f6c8a (patch)
tree7ad577eb66febb985ed029ae75c47461611775e2 /src-self-hosted
parent7875649c2481f90b918581670c9268d6033f873f (diff)
parent20b4a2cf2cded8904a57714ed2b90c857f12c6b1 (diff)
downloadzig-d337469e4484ffd160b4508e2366fefd435f6c8a.tar.gz
zig-d337469e4484ffd160b4508e2366fefd435f6c8a.zip
Merge pull request #5583 from ziglang/zig-ast-to-zir
self-hosted: hook up Zig AST to ZIR
Diffstat (limited to 'src-self-hosted')
-rw-r--r--src-self-hosted/Module.zig1907
-rw-r--r--src-self-hosted/TypedValue.zig8
-rw-r--r--src-self-hosted/codegen.zig23
-rw-r--r--src-self-hosted/ir.zig9
-rw-r--r--src-self-hosted/link.zig25
-rw-r--r--src-self-hosted/main.zig46
-rw-r--r--src-self-hosted/test.zig302
-rw-r--r--src-self-hosted/tracy.zig45
-rw-r--r--src-self-hosted/type.zig103
-rw-r--r--src-self-hosted/value.zig124
-rw-r--r--src-self-hosted/zir.zig468
11 files changed, 2228 insertions, 832 deletions
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index 4bcc30a65e..89dcac3f41 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -15,13 +15,16 @@ const ir = @import("ir.zig");
const zir = @import("zir.zig");
const Module = @This();
const Inst = ir.Inst;
+const ast = std.zig.ast;
+const trace = @import("tracy.zig").trace;
/// General-purpose allocator.
allocator: *Allocator,
/// Pointer to externally managed resource.
root_pkg: *Package,
/// Module owns this resource.
-root_scope: *Scope.ZIRModule,
+/// The `Scope` is either a `Scope.ZIRModule` or `Scope.File`.
+root_scope: *Scope,
bin_file: link.ElfFile,
bin_file_dir: std.fs.Dir,
bin_file_path: []const u8,
@@ -35,10 +38,10 @@ decl_exports: std.AutoHashMap(*Decl, []*Export),
/// This table owns the Export memory.
export_owners: std.AutoHashMap(*Decl, []*Export),
/// Maps fully qualified namespaced names to the Decl struct for them.
-decl_table: std.AutoHashMap(Decl.Hash, *Decl),
+decl_table: DeclTable,
optimize_mode: std.builtin.Mode,
-link_error_flags: link.ElfFile.ErrorFlags = link.ElfFile.ErrorFlags{},
+link_error_flags: link.ElfFile.ErrorFlags = .{},
work_queue: std.fifo.LinearFifo(WorkItem, .Dynamic),
@@ -49,8 +52,8 @@ work_queue: std.fifo.LinearFifo(WorkItem, .Dynamic),
/// a Decl can have a failed_decls entry but have analysis status of success.
failed_decls: std.AutoHashMap(*Decl, *ErrorMsg),
/// Using a map here for consistency with the other fields here.
-/// The ErrorMsg memory is owned by the `Scope.ZIRModule`, using Module's allocator.
-failed_files: std.AutoHashMap(*Scope.ZIRModule, *ErrorMsg),
+/// The ErrorMsg memory is owned by the `Scope`, using Module's allocator.
+failed_files: std.AutoHashMap(*Scope, *ErrorMsg),
/// Using a map here for consistency with the other fields here.
/// The ErrorMsg memory is owned by the `Export`, using Module's allocator.
failed_exports: std.AutoHashMap(*Export, *ErrorMsg),
@@ -60,15 +63,23 @@ failed_exports: std.AutoHashMap(*Export, *ErrorMsg),
/// previous analysis.
generation: u32 = 0,
+next_anon_name_index: usize = 0,
+
/// Candidates for deletion. After a semantic analysis update completes, this list
/// contains Decls that need to be deleted if they end up having no references to them.
-deletion_set: std.ArrayListUnmanaged(*Decl) = std.ArrayListUnmanaged(*Decl){},
+deletion_set: std.ArrayListUnmanaged(*Decl) = .{},
+
+keep_source_files_loaded: bool,
+
+const DeclTable = std.HashMap(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql);
-pub const WorkItem = union(enum) {
+const WorkItem = union(enum) {
/// Write the machine code for a Decl to the output file.
codegen_decl: *Decl,
- /// Decl has been determined to be outdated; perform semantic analysis again.
- re_analyze_decl: *Decl,
+ /// The Decl needs to be analyzed and possibly export itself.
+ /// It may have already be analyzed, or it may have been determined
+ /// to be outdated; in this case perform semantic analysis again.
+ analyze_decl: *Decl,
};
pub const Export = struct {
@@ -99,13 +110,12 @@ pub const Decl = struct {
/// mapping them to an address in the output file.
/// Memory owned by this decl, using Module's allocator.
name: [*:0]const u8,
- /// The direct parent container of the Decl. This field will need to get more fleshed out when
- /// self-hosted supports proper struct types and Zig AST => ZIR.
+ /// The direct parent container of the Decl. This is either a `Scope.File` or `Scope.ZIRModule`.
/// Reference to externally owned memory.
- scope: *Scope.ZIRModule,
- /// Byte offset into the source file that contains this declaration.
- /// This is the base offset that src offsets within this Decl are relative to.
- src: usize,
+ scope: *Scope,
+ /// The AST Node decl index or ZIR Inst index that contains this declaration.
+ /// Must be recomputed when the corresponding source file is modified.
+ src_index: usize,
/// The most recent value of the Decl after a successful semantic analysis.
typed_value: union(enum) {
never_succeeded: void,
@@ -116,6 +126,9 @@ pub const Decl = struct {
/// analysis of the function body is performed with this value set to `success`. Functions
/// have their own analysis status field.
analysis: enum {
+ /// This Decl corresponds to an AST Node that has not been referenced yet, and therefore
+ /// because of Zig's lazy declaration analysis, it will remain unanalyzed until referenced.
+ unreferenced,
/// Semantic analysis for this Decl is running right now. This state detects dependency loops.
in_progress,
/// This Decl might be OK but it depends on another one which did not successfully complete
@@ -125,6 +138,10 @@ pub const Decl = struct {
/// There will be a corresponding ErrorMsg in Module.failed_decls.
sema_failure,
/// There will be a corresponding ErrorMsg in Module.failed_decls.
+ /// This indicates the failure was something like running out of disk space,
+ /// and attempting semantic analysis again may succeed.
+ sema_failure_retryable,
+ /// There will be a corresponding ErrorMsg in Module.failed_decls.
codegen_failure,
/// There will be a corresponding ErrorMsg in Module.failed_decls.
/// This indicates the failure was something like running out of disk space,
@@ -150,7 +167,7 @@ pub const Decl = struct {
/// This is populated regardless of semantic analysis and code generation.
link: link.ElfFile.TextBlock = link.ElfFile.TextBlock.empty,
- contents_hash: Hash,
+ contents_hash: std.zig.SrcHash,
/// The shallow set of other decls whose typed_value could possibly change if this Decl's
/// typed_value is modified.
@@ -169,28 +186,28 @@ pub const Decl = struct {
allocator.destroy(self);
}
- pub const Hash = [16]u8;
-
- /// If the name is small enough, it is used directly as the hash.
- /// If it is long, blake3 hash is computed.
- pub fn hashSimpleName(name: []const u8) Hash {
- var out: Hash = undefined;
- if (name.len <= Hash.len) {
- mem.copy(u8, &out, name);
- mem.set(u8, out[name.len..], 0);
- } else {
- std.crypto.Blake3.hash(name, &out);
+ pub fn src(self: Decl) usize {
+ switch (self.scope.tag) {
+ .file => {
+ const file = @fieldParentPtr(Scope.File, "base", self.scope);
+ const tree = file.contents.tree;
+ const decl_node = tree.root_node.decls()[self.src_index];
+ return tree.token_locs[decl_node.firstToken()].start;
+ },
+ .zir_module => {
+ const zir_module = @fieldParentPtr(Scope.ZIRModule, "base", self.scope);
+ const module = zir_module.contents.module;
+ const src_decl = module.decls[self.src_index];
+ return src_decl.inst.src;
+ },
+ .block => unreachable,
+ .gen_zir => unreachable,
+ .decl => unreachable,
}
- return out;
}
- /// Must generate unique bytes with no collisions with other decls.
- /// The point of hashing here is only to limit the number of bytes of
- /// the unique identifier to a fixed size (16 bytes).
- pub fn fullyQualifiedNameHash(self: Decl) Hash {
- // Right now we only have ZIRModule as the source. So this is simply the
- // relative name of the decl.
- return hashSimpleName(mem.spanZ(self.name));
+ pub fn fullyQualifiedNameHash(self: Decl) Scope.NameHash {
+ return self.scope.fullyQualifiedNameHash(mem.spanZ(self.name));
}
pub fn typedValue(self: *Decl) error{AnalysisFail}!TypedValue {
@@ -248,11 +265,9 @@ pub const Decl = struct {
/// Fn struct memory is owned by the Decl's TypedValue.Managed arena allocator.
pub const Fn = struct {
/// This memory owned by the Decl's TypedValue.Managed arena allocator.
- fn_type: Type,
analysis: union(enum) {
- /// The value is the source instruction.
- queued: *zir.Inst.Fn,
- in_progress: *Analysis,
+ queued: *ZIR,
+ in_progress,
/// There will be a corresponding ErrorMsg in Module.failed_decls
sema_failure,
/// This Fn might be OK but it depends on another Decl which did not successfully complete
@@ -266,16 +281,20 @@ pub const Fn = struct {
/// of Fn analysis.
pub const Analysis = struct {
inner_block: Scope.Block,
- /// TODO Performance optimization idea: instead of this inst_table,
- /// use a field in the zir.Inst instead to track corresponding instructions
- inst_table: std.AutoHashMap(*zir.Inst, *Inst),
- needed_inst_capacity: usize,
+ };
+
+ /// Contains un-analyzed ZIR instructions generated from Zig source AST.
+ pub const ZIR = struct {
+ body: zir.Module.Body,
+ arena: std.heap.ArenaAllocator.State,
};
};
pub const Scope = struct {
tag: Tag,
+ pub const NameHash = [16]u8;
+
pub fn cast(base: *Scope, comptime T: type) ?*T {
if (base.tag != T.base_tag)
return null;
@@ -289,7 +308,9 @@ pub const Scope = struct {
switch (self.tag) {
.block => return self.cast(Block).?.arena,
.decl => return &self.cast(DeclAnalysis).?.arena.allocator,
+ .gen_zir => return &self.cast(GenZIR).?.arena.allocator,
.zir_module => return &self.cast(ZIRModule).?.contents.module.arena.allocator,
+ .file => unreachable,
}
}
@@ -298,18 +319,45 @@ pub const Scope = struct {
pub fn decl(self: *Scope) ?*Decl {
return switch (self.tag) {
.block => self.cast(Block).?.decl,
+ .gen_zir => self.cast(GenZIR).?.decl,
.decl => self.cast(DeclAnalysis).?.decl,
.zir_module => null,
+ .file => null,
};
}
- /// Asserts the scope has a parent which is a ZIRModule and
+ /// Asserts the scope has a parent which is a ZIRModule or File and
/// returns it.
- pub fn namespace(self: *Scope) *ZIRModule {
+ pub fn namespace(self: *Scope) *Scope {
switch (self.tag) {
.block => return self.cast(Block).?.decl.scope,
+ .gen_zir => return self.cast(GenZIR).?.decl.scope,
.decl => return self.cast(DeclAnalysis).?.decl.scope,
- .zir_module => return self.cast(ZIRModule).?,
+ .zir_module, .file => return self,
+ }
+ }
+
+ /// Must generate unique bytes with no collisions with other decls.
+ /// The point of hashing here is only to limit the number of bytes of
+ /// the unique identifier to a fixed size (16 bytes).
+ pub fn fullyQualifiedNameHash(self: *Scope, name: []const u8) NameHash {
+ switch (self.tag) {
+ .block => unreachable,
+ .gen_zir => unreachable,
+ .decl => unreachable,
+ .zir_module => return self.cast(ZIRModule).?.fullyQualifiedNameHash(name),
+ .file => return self.cast(File).?.fullyQualifiedNameHash(name),
+ }
+ }
+
+ /// Asserts the scope is a child of a File and has an AST tree and returns the tree.
+ pub fn tree(self: *Scope) *ast.Tree {
+ switch (self.tag) {
+ .file => return self.cast(File).?.contents.tree,
+ .zir_module => unreachable,
+ .decl => return self.cast(DeclAnalysis).?.decl.scope.cast(File).?.contents.tree,
+ .block => return self.cast(Block).?.decl.scope.cast(File).?.contents.tree,
+ .gen_zir => return self.cast(GenZIR).?.decl.scope.cast(File).?.contents.tree,
}
}
@@ -325,10 +373,173 @@ pub const Scope = struct {
});
}
+ /// Asserts the scope has a parent which is a ZIRModule or File and
+ /// returns the sub_file_path field.
+ pub fn subFilePath(base: *Scope) []const u8 {
+ switch (base.tag) {
+ .file => return @fieldParentPtr(File, "base", base).sub_file_path,
+ .zir_module => return @fieldParentPtr(ZIRModule, "base", base).sub_file_path,
+ .block => unreachable,
+ .gen_zir => unreachable,
+ .decl => unreachable,
+ }
+ }
+
+ pub fn unload(base: *Scope, allocator: *Allocator) void {
+ switch (base.tag) {
+ .file => return @fieldParentPtr(File, "base", base).unload(allocator),
+ .zir_module => return @fieldParentPtr(ZIRModule, "base", base).unload(allocator),
+ .block => unreachable,
+ .gen_zir => unreachable,
+ .decl => unreachable,
+ }
+ }
+
+ pub fn getSource(base: *Scope, module: *Module) ![:0]const u8 {
+ switch (base.tag) {
+ .file => return @fieldParentPtr(File, "base", base).getSource(module),
+ .zir_module => return @fieldParentPtr(ZIRModule, "base", base).getSource(module),
+ .gen_zir => unreachable,
+ .block => unreachable,
+ .decl => unreachable,
+ }
+ }
+
+ /// Asserts the scope is a namespace Scope and removes the Decl from the namespace.
+ pub fn removeDecl(base: *Scope, child: *Decl) void {
+ switch (base.tag) {
+ .file => return @fieldParentPtr(File, "base", base).removeDecl(child),
+ .zir_module => return @fieldParentPtr(ZIRModule, "base", base).removeDecl(child),
+ .block => unreachable,
+ .gen_zir => unreachable,
+ .decl => unreachable,
+ }
+ }
+
+ /// Asserts the scope is a File or ZIRModule and deinitializes it, then deallocates it.
+ pub fn destroy(base: *Scope, allocator: *Allocator) void {
+ switch (base.tag) {
+ .file => {
+ const scope_file = @fieldParentPtr(File, "base", base);
+ scope_file.deinit(allocator);
+ allocator.destroy(scope_file);
+ },
+ .zir_module => {
+ const scope_zir_module = @fieldParentPtr(ZIRModule, "base", base);
+ scope_zir_module.deinit(allocator);
+ allocator.destroy(scope_zir_module);
+ },
+ .block => unreachable,
+ .gen_zir => unreachable,
+ .decl => unreachable,
+ }
+ }
+
+ fn name_hash_hash(x: NameHash) u32 {
+ return @truncate(u32, @bitCast(u128, x));
+ }
+
+ fn name_hash_eql(a: NameHash, b: NameHash) bool {
+ return @bitCast(u128, a) == @bitCast(u128, b);
+ }
+
pub const Tag = enum {
+ /// .zir source code.
zir_module,
+ /// .zig source code.
+ file,
block,
decl,
+ gen_zir,
+ };
+
+ pub const File = struct {
+ pub const base_tag: Tag = .file;
+ base: Scope = Scope{ .tag = base_tag },
+
+ /// Relative to the owning package's root_src_dir.
+ /// Reference to external memory, not owned by File.
+ sub_file_path: []const u8,
+ source: union(enum) {
+ unloaded: void,
+ bytes: [:0]const u8,
+ },
+ contents: union {
+ not_available: void,
+ tree: *ast.Tree,
+ },
+ status: enum {
+ never_loaded,
+ unloaded_success,
+ unloaded_parse_failure,
+ loaded_success,
+ },
+
+ /// Direct children of the file.
+ decls: ArrayListUnmanaged(*Decl),
+
+ pub fn unload(self: *File, allocator: *Allocator) void {
+ switch (self.status) {
+ .never_loaded,
+ .unloaded_parse_failure,
+ .unloaded_success,
+ => {},
+
+ .loaded_success => {
+ self.contents.tree.deinit();
+ self.status = .unloaded_success;
+ },
+ }
+ switch (self.source) {
+ .bytes => |bytes| {
+ allocator.free(bytes);
+ self.source = .{ .unloaded = {} };
+ },
+ .unloaded => {},
+ }
+ }
+
+ pub fn deinit(self: *File, allocator: *Allocator) void {
+ self.decls.deinit(allocator);
+ self.unload(allocator);
+ self.* = undefined;
+ }
+
+ pub fn removeDecl(self: *File, child: *Decl) void {
+ for (self.decls.items) |item, i| {
+ if (item == child) {
+ _ = self.decls.swapRemove(i);
+ return;
+ }
+ }
+ }
+
+ pub fn dumpSrc(self: *File, src: usize) void {
+ const loc = std.zig.findLineColumn(self.source.bytes, src);
+ std.debug.warn("{}:{}:{}\n", .{ self.sub_file_path, loc.line + 1, loc.column + 1 });
+ }
+
+ pub fn getSource(self: *File, module: *Module) ![:0]const u8 {
+ switch (self.source) {
+ .unloaded => {
+ const source = try module.root_pkg.root_src_dir.readFileAllocOptions(
+ module.allocator,
+ self.sub_file_path,
+ std.math.maxInt(u32),
+ 1,
+ 0,
+ );
+ self.source = .{ .bytes = source };
+ return source;
+ },
+ .bytes => |bytes| return bytes,
+ }
+ }
+
+ pub fn fullyQualifiedNameHash(self: *File, name: []const u8) NameHash {
+ // We don't have struct scopes yet so this is currently just a simple name hash.
+ return std.zig.hashSrc(name);
+ }
};
pub const ZIRModule = struct {
@@ -355,6 +566,11 @@ pub const Scope = struct {
loaded_success,
},
+ /// Even though .zir files only have 1 module, this set is still needed
+ /// because of anonymous Decls, which can exist in the global set, but
+ /// not this one.
+ decls: ArrayListUnmanaged(*Decl),
+
pub fn unload(self: *ZIRModule, allocator: *Allocator) void {
switch (self.status) {
.never_loaded,
@@ -366,11 +582,13 @@ pub const Scope = struct {
.loaded_success => {
self.contents.module.deinit(allocator);
allocator.destroy(self.contents.module);
+ self.contents = .{ .not_available = {} };
self.status = .unloaded_success;
},
.loaded_sema_failure => {
self.contents.module.deinit(allocator);
allocator.destroy(self.contents.module);
+ self.contents = .{ .not_available = {} };
self.status = .unloaded_sema_failure;
},
}
@@ -384,14 +602,46 @@ pub const Scope = struct {
}
pub fn deinit(self: *ZIRModule, allocator: *Allocator) void {
+ self.decls.deinit(allocator);
self.unload(allocator);
self.* = undefined;
}
+ pub fn removeDecl(self: *ZIRModule, child: *Decl) void {
+ for (self.decls.items) |item, i| {
+ if (item == child) {
+ _ = self.decls.swapRemove(i);
+ return;
+ }
+ }
+ }
+
pub fn dumpSrc(self: *ZIRModule, src: usize) void {
const loc = std.zig.findLineColumn(self.source.bytes, src);
std.debug.warn("{}:{}:{}\n", .{ self.sub_file_path, loc.line + 1, loc.column + 1 });
}
+
+ pub fn getSource(self: *ZIRModule, module: *Module) ![:0]const u8 {
+ switch (self.source) {
+ .unloaded => {
+ const source = try module.root_pkg.root_src_dir.readFileAllocOptions(
+ module.allocator,
+ self.sub_file_path,
+ std.math.maxInt(u32),
+ 1,
+ 0,
+ );
+ self.source = .{ .bytes = source };
+ return source;
+ },
+ .bytes => |bytes| return bytes,
+ }
+ }
+
+ pub fn fullyQualifiedNameHash(self: *ZIRModule, name: []const u8) NameHash {
+ // ZIR modules only have 1 file with all decls global in the same namespace.
+ return std.zig.hashSrc(name);
+ }
};
/// This is a temporary structure, references to it are valid only
@@ -399,7 +649,7 @@ pub const Scope = struct {
pub const Block = struct {
pub const base_tag: Tag = .block;
base: Scope = Scope{ .tag = base_tag },
- func: *Fn,
+ func: ?*Fn,
decl: *Decl,
instructions: ArrayListUnmanaged(*Inst),
/// Points to the arena allocator of DeclAnalysis
@@ -414,6 +664,16 @@ pub const Scope = struct {
decl: *Decl,
arena: std.heap.ArenaAllocator,
};
+
+ /// This is a temporary structure, references to it are valid only
+ /// during semantic analysis of the decl.
+ pub const GenZIR = struct {
+ pub const base_tag: Tag = .gen_zir;
+ base: Scope = Scope{ .tag = base_tag },
+ decl: *Decl,
+ arena: std.heap.ArenaAllocator,
+ instructions: std.ArrayList(*zir.Inst),
+ };
};
pub const Body = struct {
@@ -463,19 +723,10 @@ pub const InitOptions = struct {
link_mode: ?std.builtin.LinkMode = null,
object_format: ?std.builtin.ObjectFormat = null,
optimize_mode: std.builtin.Mode = .Debug,
+ keep_source_files_loaded: bool = false,
};
pub fn init(gpa: *Allocator, options: InitOptions) !Module {
- const root_scope = try gpa.create(Scope.ZIRModule);
- errdefer gpa.destroy(root_scope);
-
- root_scope.* = .{
- .sub_file_path = options.root_pkg.root_src_path,
- .source = .{ .unloaded = {} },
- .contents = .{ .not_available = {} },
- .status = .never_loaded,
- };
-
const bin_file_dir = options.bin_file_dir orelse std.fs.cwd();
var bin_file = try link.openBinFilePath(gpa, bin_file_dir, options.bin_file_path, .{
.target = options.target,
@@ -485,6 +736,32 @@ pub fn init(gpa: *Allocator, options: InitOptions) !Module {
});
errdefer bin_file.deinit();
+ const root_scope = blk: {
+ if (mem.endsWith(u8, options.root_pkg.root_src_path, ".zig")) {
+ const root_scope = try gpa.create(Scope.File);
+ root_scope.* = .{
+ .sub_file_path = options.root_pkg.root_src_path,
+ .source = .{ .unloaded = {} },
+ .contents = .{ .not_available = {} },
+ .status = .never_loaded,
+ .decls = .{},
+ };
+ break :blk &root_scope.base;
+ } else if (mem.endsWith(u8, options.root_pkg.root_src_path, ".zir")) {
+ const root_scope = try gpa.create(Scope.ZIRModule);
+ root_scope.* = .{
+ .sub_file_path = options.root_pkg.root_src_path,
+ .source = .{ .unloaded = {} },
+ .contents = .{ .not_available = {} },
+ .status = .never_loaded,
+ .decls = .{},
+ };
+ break :blk &root_scope.base;
+ } else {
+ unreachable;
+ }
+ };
+
return Module{
.allocator = gpa,
.root_pkg = options.root_pkg,
@@ -493,13 +770,14 @@ pub fn init(gpa: *Allocator, options: InitOptions) !Module {
.bin_file_path = options.bin_file_path,
.bin_file = bin_file,
.optimize_mode = options.optimize_mode,
- .decl_table = std.AutoHashMap(Decl.Hash, *Decl).init(gpa),
+ .decl_table = DeclTable.init(gpa),
.decl_exports = std.AutoHashMap(*Decl, []*Export).init(gpa),
.export_owners = std.AutoHashMap(*Decl, []*Export).init(gpa),
.failed_decls = std.AutoHashMap(*Decl, *ErrorMsg).init(gpa),
- .failed_files = std.AutoHashMap(*Scope.ZIRModule, *ErrorMsg).init(gpa),
+ .failed_files = std.AutoHashMap(*Scope, *ErrorMsg).init(gpa),
.failed_exports = std.AutoHashMap(*Export, *ErrorMsg).init(gpa),
.work_queue = std.fifo.LinearFifo(WorkItem, .Dynamic).init(gpa),
+ .keep_source_files_loaded = options.keep_source_files_loaded,
};
}
@@ -551,10 +829,7 @@ pub fn deinit(self: *Module) void {
}
self.export_owners.deinit();
}
- {
- self.root_scope.deinit(allocator);
- allocator.destroy(self.root_scope);
- }
+ self.root_scope.destroy(allocator);
self.* = undefined;
}
@@ -571,19 +846,31 @@ pub fn target(self: Module) std.Target {
/// Detect changes to source files, perform semantic analysis, and update the output files.
pub fn update(self: *Module) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
self.generation += 1;
// TODO Use the cache hash file system to detect which source files changed.
- // Here we simulate a full cache miss.
- // Analyze the root source file now.
- // Source files could have been loaded for any reason; to force a refresh we unload now.
- self.root_scope.unload(self.allocator);
- self.analyzeRoot(self.root_scope) catch |err| switch (err) {
- error.AnalysisFail => {
- assert(self.totalErrorCount() != 0);
- },
- else => |e| return e,
- };
+ // Until then we simulate a full cache miss. Source files could have been loaded for any reason;
+ // to force a refresh we unload now.
+ if (self.root_scope.cast(Scope.File)) |zig_file| {
+ zig_file.unload(self.allocator);
+ self.analyzeRootSrcFile(zig_file) catch |err| switch (err) {
+ error.AnalysisFail => {
+ assert(self.totalErrorCount() != 0);
+ },
+ else => |e| return e,
+ };
+ } else if (self.root_scope.cast(Scope.ZIRModule)) |zir_module| {
+ zir_module.unload(self.allocator);
+ self.analyzeRootZIRModule(zir_module) catch |err| switch (err) {
+ error.AnalysisFail => {
+ assert(self.totalErrorCount() != 0);
+ },
+ else => |e| return e,
+ };
+ }
try self.performAllTheWork();
@@ -596,14 +883,16 @@ pub fn update(self: *Module) !void {
try self.deleteDecl(decl);
}
+ self.link_error_flags = self.bin_file.error_flags;
+
// If there are any errors, we anticipate the source files being loaded
// to report error messages. Otherwise we unload all source files to save memory.
if (self.totalErrorCount() == 0) {
- self.root_scope.unload(self.allocator);
+ if (!self.keep_source_files_loaded) {
+ self.root_scope.unload(self.allocator);
+ }
+ try self.bin_file.flush();
}
-
- try self.bin_file.flush();
- self.link_error_flags = self.bin_file.error_flags;
}
/// Having the file open for writing is problematic as far as executing the
@@ -619,10 +908,10 @@ pub fn makeBinFileWritable(self: *Module) !void {
}
pub fn totalErrorCount(self: *Module) usize {
- return self.failed_decls.size +
+ const total = self.failed_decls.size +
self.failed_files.size +
- self.failed_exports.size +
- @boolToInt(self.link_error_flags.no_entry_point_found);
+ self.failed_exports.size;
+ return if (total == 0) @boolToInt(self.link_error_flags.no_entry_point_found) else total;
}
pub fn getAllErrorsAlloc(self: *Module) !AllErrors {
@@ -637,8 +926,8 @@ pub fn getAllErrorsAlloc(self: *Module) !AllErrors {
while (it.next()) |kv| {
const scope = kv.key;
const err_msg = kv.value;
- const source = try self.getSource(scope);
- try AllErrors.add(&arena, &errors, scope.sub_file_path, source, err_msg.*);
+ const source = try scope.getSource(self);
+ try AllErrors.add(&arena, &errors, scope.subFilePath(), source, err_msg.*);
}
}
{
@@ -646,8 +935,8 @@ pub fn getAllErrorsAlloc(self: *Module) !AllErrors {
while (it.next()) |kv| {
const decl = kv.key;
const err_msg = kv.value;
- const source = try self.getSource(decl.scope);
- try AllErrors.add(&arena, &errors, decl.scope.sub_file_path, source, err_msg.*);
+ const source = try decl.scope.getSource(self);
+ try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*);
}
}
{
@@ -655,12 +944,12 @@ pub fn getAllErrorsAlloc(self: *Module) !AllErrors {
while (it.next()) |kv| {
const decl = kv.key.owner_decl;
const err_msg = kv.value;
- const source = try self.getSource(decl.scope);
- try AllErrors.add(&arena, &errors, decl.scope.sub_file_path, source, err_msg.*);
+ const source = try decl.scope.getSource(self);
+ try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*);
}
}
- if (self.link_error_flags.no_entry_point_found) {
+ if (errors.items.len == 0 and self.link_error_flags.no_entry_point_found) {
try errors.append(.{
.src_path = self.root_pkg.root_src_path,
.line = 0,
@@ -683,12 +972,14 @@ const InnerError = error{ OutOfMemory, AnalysisFail };
pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
while (self.work_queue.readItem()) |work_item| switch (work_item) {
.codegen_decl => |decl| switch (decl.analysis) {
+ .unreferenced => unreachable,
.in_progress => unreachable,
.outdated => unreachable,
.sema_failure,
.codegen_failure,
.dependency_failure,
+ .sema_failure_retryable,
=> continue,
.complete, .codegen_failure_retryable => {
@@ -696,12 +987,10 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
switch (payload.func.analysis) {
.queued => self.analyzeFnBody(decl, payload.func) catch |err| switch (err) {
error.AnalysisFail => {
- if (payload.func.analysis == .queued) {
- payload.func.analysis = .dependency_failure;
- }
+ assert(payload.func.analysis != .in_progress);
continue;
},
- else => |e| return e,
+ error.OutOfMemory => return error.OutOfMemory,
},
.in_progress => unreachable,
.sema_failure, .dependency_failure => continue,
@@ -720,7 +1009,7 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
try self.failed_decls.ensureCapacity(self.failed_decls.size + 1);
self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
self.allocator,
- decl.src,
+ decl.src(),
"unable to codegen: {}",
.{@errorName(err)},
));
@@ -729,41 +1018,560 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
};
},
},
- .re_analyze_decl => |decl| switch (decl.analysis) {
- .in_progress => unreachable,
+ .analyze_decl => |decl| {
+ self.ensureDeclAnalyzed(decl) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.AnalysisFail => continue,
+ };
+ },
+ };
+}
- .sema_failure,
- .codegen_failure,
- .dependency_failure,
- .complete,
- .codegen_failure_retryable,
- => continue,
+fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void {
+ const tracy = trace(@src());
+ defer tracy.end();
- .outdated => {
- const zir_module = self.getSrcModule(decl.scope) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- else => {
- try self.failed_decls.ensureCapacity(self.failed_decls.size + 1);
- self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
- self.allocator,
- decl.src,
- "unable to load source file '{}': {}",
- .{ decl.scope.sub_file_path, @errorName(err) },
- ));
- decl.analysis = .codegen_failure_retryable;
- continue;
+ const subsequent_analysis = switch (decl.analysis) {
+ .in_progress => unreachable,
+
+ .sema_failure,
+ .sema_failure_retryable,
+ .codegen_failure,
+ .dependency_failure,
+ .codegen_failure_retryable,
+ => return error.AnalysisFail,
+
+ .complete, .outdated => blk: {
+ if (decl.generation == self.generation) {
+ assert(decl.analysis == .complete);
+ return;
+ }
+ //std.debug.warn("re-analyzing {}\n", .{decl.name});
+
+ // The exports this Decl performs will be re-discovered, so we remove them here
+ // prior to re-analysis.
+ self.deleteDeclExports(decl);
+ // Dependencies will be re-discovered, so we remove them here prior to re-analysis.
+ for (decl.dependencies.items) |dep| {
+ dep.removeDependant(decl);
+ if (dep.dependants.items.len == 0 and !dep.deletion_flag) {
+ // We don't perform a deletion here, because this Decl or another one
+ // may end up referencing it before the update is complete.
+ dep.deletion_flag = true;
+ try self.deletion_set.append(self.allocator, dep);
+ }
+ }
+ decl.dependencies.shrink(self.allocator, 0);
+
+ break :blk true;
+ },
+
+ .unreferenced => false,
+ };
+
+ const type_changed = if (self.root_scope.cast(Scope.ZIRModule)) |zir_module|
+ try self.analyzeZirDecl(decl, zir_module.contents.module.decls[decl.src_index])
+ else
+ self.astGenAndAnalyzeDecl(decl) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.AnalysisFail => return error.AnalysisFail,
+ else => {
+ try self.failed_decls.ensureCapacity(self.failed_decls.size + 1);
+ self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
+ self.allocator,
+ decl.src(),
+ "unable to analyze: {}",
+ .{@errorName(err)},
+ ));
+ decl.analysis = .sema_failure_retryable;
+ return error.AnalysisFail;
+ },
+ };
+
+ if (subsequent_analysis) {
+ // We may need to chase the dependants and re-analyze them.
+ // However, if the decl is a function, and the type is the same, we do not need to.
+ if (type_changed or decl.typed_value.most_recent.typed_value.val.tag() != .function) {
+ for (decl.dependants.items) |dep| {
+ switch (dep.analysis) {
+ .unreferenced => unreachable,
+ .in_progress => unreachable,
+ .outdated => continue, // already queued for update
+
+ .dependency_failure,
+ .sema_failure,
+ .sema_failure_retryable,
+ .codegen_failure,
+ .codegen_failure_retryable,
+ .complete,
+ => if (dep.generation != self.generation) {
+ try self.markOutdatedDecl(dep);
},
+ }
+ }
+ }
+ }
+}
+
+fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const file_scope = decl.scope.cast(Scope.File).?;
+ const tree = try self.getAstTree(file_scope);
+ const ast_node = tree.root_node.decls()[decl.src_index];
+ switch (ast_node.id) {
+ .FnProto => {
+ const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", ast_node);
+
+ decl.analysis = .in_progress;
+
+ // This arena allocator's memory is discarded at the end of this function. It is used
+ // to determine the type of the function, and hence the type of the decl, which is needed
+ // to complete the Decl analysis.
+ var fn_type_scope: Scope.GenZIR = .{
+ .decl = decl,
+ .arena = std.heap.ArenaAllocator.init(self.allocator),
+ .instructions = std.ArrayList(*zir.Inst).init(self.allocator),
+ };
+ defer fn_type_scope.arena.deinit();
+ defer fn_type_scope.instructions.deinit();
+
+ const body_node = fn_proto.body_node orelse
+ return self.failTok(&fn_type_scope.base, fn_proto.fn_token, "TODO implement extern functions", .{});
+ if (fn_proto.params_len != 0) {
+ return self.failTok(
+ &fn_type_scope.base,
+ fn_proto.params()[0].name_token.?,
+ "TODO implement function parameters",
+ .{},
+ );
+ }
+ if (fn_proto.lib_name) |lib_name| {
+ return self.failNode(&fn_type_scope.base, lib_name, "TODO implement function library name", .{});
+ }
+ if (fn_proto.align_expr) |align_expr| {
+ return self.failNode(&fn_type_scope.base, align_expr, "TODO implement function align expression", .{});
+ }
+ if (fn_proto.section_expr) |sect_expr| {
+ return self.failNode(&fn_type_scope.base, sect_expr, "TODO implement function section expression", .{});
+ }
+ if (fn_proto.callconv_expr) |callconv_expr| {
+ return self.failNode(
+ &fn_type_scope.base,
+ callconv_expr,
+ "TODO implement function calling convention expression",
+ .{},
+ );
+ }
+ const return_type_expr = switch (fn_proto.return_type) {
+ .Explicit => |node| node,
+ .InferErrorSet => |node| return self.failNode(&fn_type_scope.base, node, "TODO implement inferred error sets", .{}),
+ .Invalid => |tok| return self.failTok(&fn_type_scope.base, tok, "unable to parse return type", .{}),
+ };
+
+ const return_type_inst = try self.astGenExpr(&fn_type_scope.base, return_type_expr);
+ const fn_src = tree.token_locs[fn_proto.fn_token].start;
+ const fn_type_inst = try self.addZIRInst(&fn_type_scope.base, fn_src, zir.Inst.FnType, .{
+ .return_type = return_type_inst,
+ .param_types = &[0]*zir.Inst{},
+ }, .{});
+ _ = try self.addZIRInst(&fn_type_scope.base, fn_src, zir.Inst.Return, .{ .operand = fn_type_inst }, .{});
+
+ // We need the memory for the Type to go into the arena for the Decl
+ var decl_arena = std.heap.ArenaAllocator.init(self.allocator);
+ errdefer decl_arena.deinit();
+ const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State);
+
+ var block_scope: Scope.Block = .{
+ .func = null,
+ .decl = decl,
+ .instructions = .{},
+ .arena = &decl_arena.allocator,
+ };
+ defer block_scope.instructions.deinit(self.allocator);
+
+ const fn_type = try self.analyzeBodyValueAsType(&block_scope, .{
+ .instructions = fn_type_scope.instructions.items,
+ });
+ const new_func = try decl_arena.allocator.create(Fn);
+ const fn_payload = try decl_arena.allocator.create(Value.Payload.Function);
+
+ const fn_zir = blk: {
+ // This scope's arena memory is discarded after the ZIR generation
+ // pass completes, and semantic analysis of it completes.
+ var gen_scope: Scope.GenZIR = .{
+ .decl = decl,
+ .arena = std.heap.ArenaAllocator.init(self.allocator),
+ .instructions = std.ArrayList(*zir.Inst).init(self.allocator),
};
- const decl_name = mem.spanZ(decl.name);
- // We already detected deletions, so we know this will be found.
- const src_decl = zir_module.findDecl(decl_name).?;
- self.reAnalyzeDecl(decl, src_decl) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.AnalysisFail => continue,
+ errdefer gen_scope.arena.deinit();
+ defer gen_scope.instructions.deinit();
+
+ const body_block = body_node.cast(ast.Node.Block).?;
+
+ try self.astGenBlock(&gen_scope.base, body_block);
+
+ const fn_zir = try gen_scope.arena.allocator.create(Fn.ZIR);
+ fn_zir.* = .{
+ .body = .{
+ .instructions = try gen_scope.arena.allocator.dupe(*zir.Inst, gen_scope.instructions.items),
+ },
+ .arena = gen_scope.arena.state,
};
- },
+ break :blk fn_zir;
+ };
+
+ new_func.* = .{
+ .analysis = .{ .queued = fn_zir },
+ .owner_decl = decl,
+ };
+ fn_payload.* = .{ .func = new_func };
+
+ var prev_type_has_bits = false;
+ var type_changed = true;
+
+ if (decl.typedValueManaged()) |tvm| {
+ prev_type_has_bits = tvm.typed_value.ty.hasCodeGenBits();
+ type_changed = !tvm.typed_value.ty.eql(fn_type);
+
+ tvm.deinit(self.allocator);
+ }
+
+ decl_arena_state.* = decl_arena.state;
+ decl.typed_value = .{
+ .most_recent = .{
+ .typed_value = .{
+ .ty = fn_type,
+ .val = Value.initPayload(&fn_payload.base),
+ },
+ .arena = decl_arena_state,
+ },
+ };
+ decl.analysis = .complete;
+ decl.generation = self.generation;
+
+ if (fn_type.hasCodeGenBits()) {
+ // We don't fully codegen the decl until later, but we do need to reserve a global
+ // offset table index for it. This allows us to codegen decls out of dependency order,
+ // increasing how many computations can be done in parallel.
+ try self.bin_file.allocateDeclIndexes(decl);
+ try self.work_queue.writeItem(.{ .codegen_decl = decl });
+ } else if (prev_type_has_bits) {
+ self.bin_file.freeDecl(decl);
+ }
+
+ if (fn_proto.extern_export_inline_token) |maybe_export_token| {
+ if (tree.token_ids[maybe_export_token] == .Keyword_export) {
+ const export_src = tree.token_locs[maybe_export_token].start;
+ const name_loc = tree.token_locs[fn_proto.name_token.?];
+ const name = tree.tokenSliceLoc(name_loc);
+ // The scope needs to have the decl in it.
+ try self.analyzeExport(&block_scope.base, export_src, name, decl);
+ }
+ }
+ return type_changed;
},
+ .VarDecl => @panic("TODO var decl"),
+ .Comptime => @panic("TODO comptime decl"),
+ .Use => @panic("TODO usingnamespace decl"),
+ else => unreachable,
+ }
+}
+
+fn analyzeBodyValueAsType(self: *Module, block_scope: *Scope.Block, body: zir.Module.Body) !Type {
+ try self.analyzeBody(&block_scope.base, body);
+ for (block_scope.instructions.items) |inst| {
+ if (inst.cast(Inst.Ret)) |ret| {
+ const val = try self.resolveConstValue(&block_scope.base, ret.args.operand);
+ return val.toType();
+ } else {
+ return self.fail(&block_scope.base, inst.src, "unable to resolve comptime value", .{});
+ }
+ }
+ unreachable;
+}
+
+fn astGenExpr(self: *Module, scope: *Scope, ast_node: *ast.Node) InnerError!*zir.Inst {
+ switch (ast_node.id) {
+ .Identifier => return self.astGenIdent(scope, @fieldParentPtr(ast.Node.Identifier, "base", ast_node)),
+ .Asm => return self.astGenAsm(scope, @fieldParentPtr(ast.Node.Asm, "base", ast_node)),
+ .StringLiteral => return self.astGenStringLiteral(scope, @fieldParentPtr(ast.Node.StringLiteral, "base", ast_node)),
+ .IntegerLiteral => return self.astGenIntegerLiteral(scope, @fieldParentPtr(ast.Node.IntegerLiteral, "base", ast_node)),
+ .BuiltinCall => return self.astGenBuiltinCall(scope, @fieldParentPtr(ast.Node.BuiltinCall, "base", ast_node)),
+ .Call => return self.astGenCall(scope, @fieldParentPtr(ast.Node.Call, "base", ast_node)),
+ .Unreachable => return self.astGenUnreachable(scope, @fieldParentPtr(ast.Node.Unreachable, "base", ast_node)),
+ .ControlFlowExpression => return self.astGenControlFlowExpression(scope, @fieldParentPtr(ast.Node.ControlFlowExpression, "base", ast_node)),
+ else => return self.failNode(scope, ast_node, "TODO implement astGenExpr for {}", .{@tagName(ast_node.id)}),
+ }
+}
+
+fn astGenControlFlowExpression(
+ self: *Module,
+ scope: *Scope,
+ cfe: *ast.Node.ControlFlowExpression,
+) InnerError!*zir.Inst {
+ switch (cfe.kind) {
+ .Break => return self.failNode(scope, &cfe.base, "TODO implement astGenExpr for Break", .{}),
+ .Continue => return self.failNode(scope, &cfe.base, "TODO implement astGenExpr for Continue", .{}),
+ .Return => {},
+ }
+ const tree = scope.tree();
+ const src = tree.token_locs[cfe.ltoken].start;
+ if (cfe.rhs) |rhs_node| {
+ const operand = try self.astGenExpr(scope, rhs_node);
+ return self.addZIRInst(scope, src, zir.Inst.Return, .{ .operand = operand }, .{});
+ } else {
+ return self.addZIRInst(scope, src, zir.Inst.ReturnVoid, .{}, .{});
+ }
+}
+
+fn astGenIdent(self: *Module, scope: *Scope, ident: *ast.Node.Identifier) InnerError!*zir.Inst {
+ const tree = scope.tree();
+ const ident_name = tree.tokenSlice(ident.token);
+ if (mem.eql(u8, ident_name, "_")) {
+ return self.failNode(scope, &ident.base, "TODO implement '_' identifier", .{});
+ }
+
+ if (getSimplePrimitiveValue(ident_name)) |typed_value| {
+ const src = tree.token_locs[ident.token].start;
+ return self.addZIRInstConst(scope, src, typed_value);
+ }
+
+ if (ident_name.len >= 2) integer: {
+ const first_c = ident_name[0];
+ if (first_c == 'i' or first_c == 'u') {
+ const is_signed = first_c == 'i';
+ const bit_count = std.fmt.parseInt(u16, ident_name[1..], 10) catch |err| switch (err) {
+ error.Overflow => return self.failNode(
+ scope,
+ &ident.base,
+ "primitive integer type '{}' exceeds maximum bit width of 65535",
+ .{ident_name},
+ ),
+ error.InvalidCharacter => break :integer,
+ };
+ return self.failNode(scope, &ident.base, "TODO implement arbitrary integer bitwidth types", .{});
+ }
+ }
+
+ if (self.lookupDeclName(scope, ident_name)) |decl| {
+ const src = tree.token_locs[ident.token].start;
+ return try self.addZIRInst(scope, src, zir.Inst.DeclValInModule, .{ .decl = decl }, .{});
+ }
+
+ return self.failNode(scope, &ident.base, "TODO implement local variable identifier lookup", .{});
+}
+
+fn astGenStringLiteral(self: *Module, scope: *Scope, str_lit: *ast.Node.StringLiteral) InnerError!*zir.Inst {
+ const tree = scope.tree();
+ const unparsed_bytes = tree.tokenSlice(str_lit.token);
+ const arena = scope.arena();
+
+ var bad_index: usize = undefined;
+ const bytes = std.zig.parseStringLiteral(arena, unparsed_bytes, &bad_index) catch |err| switch (err) {
+ error.InvalidCharacter => {
+ const bad_byte = unparsed_bytes[bad_index];
+ const src = tree.token_locs[str_lit.token].start;
+ return self.fail(scope, src + bad_index, "invalid string literal character: '{c}'\n", .{bad_byte});
+ },
+ else => |e| return e,
};
+
+ const src = tree.token_locs[str_lit.token].start;
+ return self.addZIRInst(scope, src, zir.Inst.Str, .{ .bytes = bytes }, .{});
+}
+
+fn astGenIntegerLiteral(self: *Module, scope: *Scope, int_lit: *ast.Node.IntegerLiteral) InnerError!*zir.Inst {
+ const arena = scope.arena();
+ const tree = scope.tree();
+ const prefixed_bytes = tree.tokenSlice(int_lit.token);
+ const base = if (mem.startsWith(u8, prefixed_bytes, "0x"))
+ 16
+ else if (mem.startsWith(u8, prefixed_bytes, "0o"))
+ 8
+ else if (mem.startsWith(u8, prefixed_bytes, "0b"))
+ 2
+ else
+ @as(u8, 10);
+
+ const bytes = if (base == 10)
+ prefixed_bytes
+ else
+ prefixed_bytes[2..];
+
+ if (std.fmt.parseInt(u64, bytes, base)) |small_int| {
+ const int_payload = try arena.create(Value.Payload.Int_u64);
+ int_payload.* = .{ .int = small_int };
+ const src = tree.token_locs[int_lit.token].start;
+ return self.addZIRInstConst(scope, src, .{
+ .ty = Type.initTag(.comptime_int),
+ .val = Value.initPayload(&int_payload.base),
+ });
+ } else |err| {
+ return self.failTok(scope, int_lit.token, "TODO implement int literals that don't fit in a u64", .{});
+ }
+}
+
+fn astGenBlock(self: *Module, scope: *Scope, block_node: *ast.Node.Block) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ if (block_node.label) |label| {
+ return self.failTok(scope, label, "TODO implement labeled blocks", .{});
+ }
+ for (block_node.statements()) |statement| {
+ _ = try self.astGenExpr(scope, statement);
+ }
+}
+
+fn astGenAsm(self: *Module, scope: *Scope, asm_node: *ast.Node.Asm) InnerError!*zir.Inst {
+ if (asm_node.outputs.len != 0) {
+ return self.failNode(scope, &asm_node.base, "TODO implement asm with an output", .{});
+ }
+ const arena = scope.arena();
+ const tree = scope.tree();
+
+ const inputs = try arena.alloc(*zir.Inst, asm_node.inputs.len);
+ const args = try arena.alloc(*zir.Inst, asm_node.inputs.len);
+
+ for (asm_node.inputs) |input, i| {
+ // TODO semantically analyze constraints
+ inputs[i] = try self.astGenExpr(scope, input.constraint);
+ args[i] = try self.astGenExpr(scope, input.expr);
+ }
+
+ const src = tree.token_locs[asm_node.asm_token].start;
+ const return_type = try self.addZIRInstConst(scope, src, .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.void_type),
+ });
+ const asm_inst = try self.addZIRInst(scope, src, zir.Inst.Asm, .{
+ .asm_source = try self.astGenExpr(scope, asm_node.template),
+ .return_type = return_type,
+ }, .{
+ .@"volatile" = asm_node.volatile_token != null,
+ //.clobbers = TODO handle clobbers
+ .inputs = inputs,
+ .args = args,
+ });
+ return asm_inst;
+}
+
+fn astGenBuiltinCall(self: *Module, scope: *Scope, call: *ast.Node.BuiltinCall) InnerError!*zir.Inst {
+ const tree = scope.tree();
+ const builtin_name = tree.tokenSlice(call.builtin_token);
+ const src = tree.token_locs[call.builtin_token].start;
+
+ inline for (std.meta.declarations(zir.Inst)) |inst| {
+ if (inst.data != .Type) continue;
+ const T = inst.data.Type;
+ if (!@hasDecl(T, "builtin_name")) continue;
+ if (std.mem.eql(u8, builtin_name, T.builtin_name)) {
+ var value: T = undefined;
+ const positionals = @typeInfo(std.meta.fieldInfo(T, "positionals").field_type).Struct;
+ if (positionals.fields.len == 0) {
+ return self.addZIRInst(scope, src, T, value.positionals, value.kw_args);
+ }
+ const arg_count: ?usize = if (positionals.fields[0].field_type == []*zir.Inst) null else positionals.fields.len;
+ if (arg_count) |some| {
+ if (call.params_len != some) {
+ return self.failTok(scope, call.builtin_token, "expected {} parameter, found {}", .{ some, call.params_len });
+ }
+ const params = call.params();
+ inline for (positionals.fields) |p, i| {
+ @field(value.positionals, p.name) = try self.astGenExpr(scope, params[i]);
+ }
+ } else {
+ return self.failTok(scope, call.builtin_token, "TODO var args builtin '{}'", .{builtin_name});
+ }
+
+ return self.addZIRInst(scope, src, T, value.positionals, .{});
+ }
+ }
+ return self.failTok(scope, call.builtin_token, "TODO implement builtin call for '{}'", .{builtin_name});
+}
+
+fn astGenCall(self: *Module, scope: *Scope, call: *ast.Node.Call) InnerError!*zir.Inst {
+ const tree = scope.tree();
+
+ if (call.params_len != 0) {
+ return self.failNode(scope, &call.base, "TODO implement fn calls with parameters", .{});
+ }
+ const lhs = try self.astGenExpr(scope, call.lhs);
+
+ const src = tree.token_locs[call.lhs.firstToken()].start;
+ return self.addZIRInst(scope, src, zir.Inst.Call, .{
+ .func = lhs,
+ .args = &[0]*zir.Inst{},
+ }, .{});
+}
+
+fn astGenUnreachable(self: *Module, scope: *Scope, unreach_node: *ast.Node.Unreachable) InnerError!*zir.Inst {
+ const tree = scope.tree();
+ const src = tree.token_locs[unreach_node.token].start;
+ return self.addZIRInst(scope, src, zir.Inst.Unreachable, .{}, .{});
+}
+
+fn getSimplePrimitiveValue(name: []const u8) ?TypedValue {
+ const simple_types = std.ComptimeStringMap(Value.Tag, .{
+ .{ "u8", .u8_type },
+ .{ "i8", .i8_type },
+ .{ "isize", .isize_type },
+ .{ "usize", .usize_type },
+ .{ "c_short", .c_short_type },
+ .{ "c_ushort", .c_ushort_type },
+ .{ "c_int", .c_int_type },
+ .{ "c_uint", .c_uint_type },
+ .{ "c_long", .c_long_type },
+ .{ "c_ulong", .c_ulong_type },
+ .{ "c_longlong", .c_longlong_type },
+ .{ "c_ulonglong", .c_ulonglong_type },
+ .{ "c_longdouble", .c_longdouble_type },
+ .{ "f16", .f16_type },
+ .{ "f32", .f32_type },
+ .{ "f64", .f64_type },
+ .{ "f128", .f128_type },
+ .{ "c_void", .c_void_type },
+ .{ "bool", .bool_type },
+ .{ "void", .void_type },
+ .{ "type", .type_type },
+ .{ "anyerror", .anyerror_type },
+ .{ "comptime_int", .comptime_int_type },
+ .{ "comptime_float", .comptime_float_type },
+ .{ "noreturn", .noreturn_type },
+ });
+ if (simple_types.get(name)) |tag| {
+ return TypedValue{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(tag),
+ };
+ }
+ if (mem.eql(u8, name, "null")) {
+ return TypedValue{
+ .ty = Type.initTag(.@"null"),
+ .val = Value.initTag(.null_value),
+ };
+ }
+ if (mem.eql(u8, name, "undefined")) {
+ return TypedValue{
+ .ty = Type.initTag(.@"undefined"),
+ .val = Value.initTag(.undef),
+ };
+ }
+ if (mem.eql(u8, name, "true")) {
+ return TypedValue{
+ .ty = Type.initTag(.bool),
+ .val = Value.initTag(.bool_true),
+ };
+ }
+ if (mem.eql(u8, name, "false")) {
+ return TypedValue{
+ .ty = Type.initTag(.bool),
+ .val = Value.initTag(.bool_false),
+ };
+ }
+ return null;
}
fn declareDeclDependency(self: *Module, depender: *Decl, dependee: *Decl) !void {
@@ -783,29 +1591,12 @@ fn declareDeclDependency(self: *Module, depender: *Decl, dependee: *Decl) !void
}
}
-fn getSource(self: *Module, root_scope: *Scope.ZIRModule) ![:0]const u8 {
- switch (root_scope.source) {
- .unloaded => {
- const source = try self.root_pkg.root_src_dir.readFileAllocOptions(
- self.allocator,
- root_scope.sub_file_path,
- std.math.maxInt(u32),
- 1,
- 0,
- );
- root_scope.source = .{ .bytes = source };
- return source;
- },
- .bytes => |bytes| return bytes,
- }
-}
-
fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module {
switch (root_scope.status) {
.never_loaded, .unloaded_success => {
try self.failed_files.ensureCapacity(self.failed_files.size + 1);
- const source = try self.getSource(root_scope);
+ const source = try root_scope.getSource(self);
var keep_zir_module = false;
const zir_module = try self.allocator.create(zir.Module);
@@ -816,7 +1607,7 @@ fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module {
if (zir_module.error_msg) |src_err_msg| {
self.failed_files.putAssumeCapacityNoClobber(
- root_scope,
+ &root_scope.base,
try ErrorMsg.create(self.allocator, src_err_msg.byte_offset, "{}", .{src_err_msg.msg}),
);
root_scope.status = .unloaded_parse_failure;
@@ -838,90 +1629,189 @@ fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module {
}
}
-fn analyzeRoot(self: *Module, root_scope: *Scope.ZIRModule) !void {
+fn getAstTree(self: *Module, root_scope: *Scope.File) !*ast.Tree {
+ const tracy = trace(@src());
+ defer tracy.end();
+
switch (root_scope.status) {
- .never_loaded => {
- const src_module = try self.getSrcModule(root_scope);
+ .never_loaded, .unloaded_success => {
+ try self.failed_files.ensureCapacity(self.failed_files.size + 1);
- // Here we ensure enough queue capacity to store all the decls, so that later we can use
- // appendAssumeCapacity.
- try self.work_queue.ensureUnusedCapacity(src_module.decls.len);
+ const source = try root_scope.getSource(self);
- for (src_module.decls) |decl| {
- if (decl.cast(zir.Inst.Export)) |export_inst| {
- _ = try self.resolveDecl(&root_scope.base, &export_inst.base);
- }
+ var keep_tree = false;
+ const tree = try std.zig.parse(self.allocator, source);
+ defer if (!keep_tree) tree.deinit();
+
+ if (tree.errors.len != 0) {
+ const parse_err = tree.errors[0];
+
+ var msg = std.ArrayList(u8).init(self.allocator);
+ defer msg.deinit();
+
+ try parse_err.render(tree.token_ids, msg.outStream());
+ const err_msg = try self.allocator.create(ErrorMsg);
+ err_msg.* = .{
+ .msg = msg.toOwnedSlice(),
+ .byte_offset = tree.token_locs[parse_err.loc()].start,
+ };
+
+ self.failed_files.putAssumeCapacityNoClobber(&root_scope.base, err_msg);
+ root_scope.status = .unloaded_parse_failure;
+ return error.AnalysisFail;
}
+
+ root_scope.status = .loaded_success;
+ root_scope.contents = .{ .tree = tree };
+ keep_tree = true;
+
+ return tree;
},
- .unloaded_parse_failure,
- .unloaded_sema_failure,
- .unloaded_success,
- .loaded_sema_failure,
- .loaded_success,
- => {
- const src_module = try self.getSrcModule(root_scope);
-
- var exports_to_resolve = std.ArrayList(*zir.Inst).init(self.allocator);
- defer exports_to_resolve.deinit();
-
- // Keep track of the decls that we expect to see in this file so that
- // we know which ones have been deleted.
- var deleted_decls = std.AutoHashMap(*Decl, void).init(self.allocator);
- defer deleted_decls.deinit();
- try deleted_decls.ensureCapacity(self.decl_table.size);
- {
- var it = self.decl_table.iterator();
- while (it.next()) |kv| {
- deleted_decls.putAssumeCapacityNoClobber(kv.value, {});
+ .unloaded_parse_failure => return error.AnalysisFail,
+
+ .loaded_success => return root_scope.contents.tree,
+ }
+}
+
+fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
+ // We may be analyzing it for the first time, or this may be
+ // an incremental update. This code handles both cases.
+ const tree = try self.getAstTree(root_scope);
+ const decls = tree.root_node.decls();
+
+ try self.work_queue.ensureUnusedCapacity(decls.len);
+ try root_scope.decls.ensureCapacity(self.allocator, decls.len);
+
+ // Keep track of the decls that we expect to see in this file so that
+ // we know which ones have been deleted.
+ var deleted_decls = std.AutoHashMap(*Decl, void).init(self.allocator);
+ defer deleted_decls.deinit();
+ try deleted_decls.ensureCapacity(root_scope.decls.items.len);
+ for (root_scope.decls.items) |file_decl| {
+ deleted_decls.putAssumeCapacityNoClobber(file_decl, {});
+ }
+
+ for (decls) |src_decl, decl_i| {
+ if (src_decl.cast(ast.Node.FnProto)) |fn_proto| {
+ // We will create a Decl for it regardless of analysis status.
+ const name_tok = fn_proto.name_token orelse
+ @panic("TODO handle missing function name in the parser");
+ const name_loc = tree.token_locs[name_tok];
+ const name = tree.tokenSliceLoc(name_loc);
+ const name_hash = root_scope.fullyQualifiedNameHash(name);
+ const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl));
+ if (self.decl_table.get(name_hash)) |kv| {
+ const decl = kv.value;
+ // Update the AST Node index of the decl, even if its contents are unchanged, it may
+ // have been re-ordered.
+ decl.src_index = decl_i;
+ deleted_decls.removeAssertDiscard(decl);
+ if (!srcHashEql(decl.contents_hash, contents_hash)) {
+ try self.markOutdatedDecl(decl);
+ decl.contents_hash = contents_hash;
}
- }
-
- for (src_module.decls) |src_decl| {
- const name_hash = Decl.hashSimpleName(src_decl.name);
- if (self.decl_table.get(name_hash)) |kv| {
- const decl = kv.value;
- deleted_decls.removeAssertDiscard(decl);
- const new_contents_hash = Decl.hashSimpleName(src_decl.contents);
- //std.debug.warn("'{}' contents: '{}'\n", .{ src_decl.name, src_decl.contents });
- if (!mem.eql(u8, &new_contents_hash, &decl.contents_hash)) {
- //std.debug.warn("'{}' {x} => {x}\n", .{ src_decl.name, decl.contents_hash, new_contents_hash });
- try self.markOutdatedDecl(decl);
- decl.contents_hash = new_contents_hash;
+ } else {
+ const new_decl = try self.createNewDecl(&root_scope.base, name, decl_i, name_hash, contents_hash);
+ root_scope.decls.appendAssumeCapacity(new_decl);
+ if (fn_proto.extern_export_inline_token) |maybe_export_token| {
+ if (tree.token_ids[maybe_export_token] == .Keyword_export) {
+ self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
}
- } else if (src_decl.cast(zir.Inst.Export)) |export_inst| {
- try exports_to_resolve.append(&export_inst.base);
}
}
- {
- // Handle explicitly deleted decls from the source code. Not to be confused
- // with when we delete decls because they are no longer referenced.
- var it = deleted_decls.iterator();
- while (it.next()) |kv| {
- //std.debug.warn("noticed '{}' deleted from source\n", .{kv.key.name});
- try self.deleteDecl(kv.key);
- }
+ }
+ // TODO also look for global variable declarations
+ // TODO also look for comptime blocks and exported globals
+ }
+ {
+ // Handle explicitly deleted decls from the source code. Not to be confused
+ // with when we delete decls because they are no longer referenced.
+ var it = deleted_decls.iterator();
+ while (it.next()) |kv| {
+ //std.debug.warn("noticed '{}' deleted from source\n", .{kv.key.name});
+ try self.deleteDecl(kv.key);
+ }
+ }
+}
+
+fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void {
+ // We may be analyzing it for the first time, or this may be
+ // an incremental update. This code handles both cases.
+ const src_module = try self.getSrcModule(root_scope);
+
+ try self.work_queue.ensureUnusedCapacity(src_module.decls.len);
+ try root_scope.decls.ensureCapacity(self.allocator, src_module.decls.len);
+
+ var exports_to_resolve = std.ArrayList(*zir.Decl).init(self.allocator);
+ defer exports_to_resolve.deinit();
+
+ // Keep track of the decls that we expect to see in this file so that
+ // we know which ones have been deleted.
+ var deleted_decls = std.AutoHashMap(*Decl, void).init(self.allocator);
+ defer deleted_decls.deinit();
+ try deleted_decls.ensureCapacity(self.decl_table.size);
+ {
+ var it = self.decl_table.iterator();
+ while (it.next()) |kv| {
+ deleted_decls.putAssumeCapacityNoClobber(kv.value, {});
+ }
+ }
+
+ for (src_module.decls) |src_decl, decl_i| {
+ const name_hash = root_scope.fullyQualifiedNameHash(src_decl.name);
+ if (self.decl_table.get(name_hash)) |kv| {
+ const decl = kv.value;
+ deleted_decls.removeAssertDiscard(decl);
+ //std.debug.warn("'{}' contents: '{}'\n", .{ src_decl.name, src_decl.contents });
+ if (!srcHashEql(src_decl.contents_hash, decl.contents_hash)) {
+ try self.markOutdatedDecl(decl);
+ decl.contents_hash = src_decl.contents_hash;
}
- for (exports_to_resolve.items) |export_inst| {
- _ = try self.resolveDecl(&root_scope.base, export_inst);
+ } else {
+ const new_decl = try self.createNewDecl(
+ &root_scope.base,
+ src_decl.name,
+ decl_i,
+ name_hash,
+ src_decl.contents_hash,
+ );
+ root_scope.decls.appendAssumeCapacity(new_decl);
+ if (src_decl.inst.cast(zir.Inst.Export)) |export_inst| {
+ try exports_to_resolve.append(src_decl);
}
- },
+ }
+ }
+ for (exports_to_resolve.items) |export_decl| {
+ _ = try self.resolveZirDecl(&root_scope.base, export_decl);
+ }
+ {
+ // Handle explicitly deleted decls from the source code. Not to be confused
+ // with when we delete decls because they are no longer referenced.
+ var it = deleted_decls.iterator();
+ while (it.next()) |kv| {
+ //std.debug.warn("noticed '{}' deleted from source\n", .{kv.key.name});
+ try self.deleteDecl(kv.key);
+ }
}
}
fn deleteDecl(self: *Module, decl: *Decl) !void {
try self.deletion_set.ensureCapacity(self.allocator, self.deletion_set.items.len + decl.dependencies.items.len);
+ // Remove from the namespace it resides in. In the case of an anonymous Decl it will
+ // not be present in the set, and this does nothing.
+ decl.scope.removeDecl(decl);
+
//std.debug.warn("deleting decl '{}'\n", .{decl.name});
const name_hash = decl.fullyQualifiedNameHash();
self.decl_table.removeAssertDiscard(name_hash);
// Remove itself from its dependencies, because we are about to destroy the decl pointer.
for (decl.dependencies.items) |dep| {
dep.removeDependant(decl);
- if (dep.dependants.items.len == 0) {
+ if (dep.dependants.items.len == 0 and !dep.deletion_flag) {
// We don't recursively perform a deletion here, because during the update,
// another reference to it may turn up.
- assert(!dep.deletion_flag);
dep.deletion_flag = true;
self.deletion_set.appendAssumeCapacity(dep);
}
@@ -974,83 +1864,89 @@ fn deleteDeclExports(self: *Module, decl: *Decl) void {
}
fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
// Use the Decl's arena for function memory.
var arena = decl.typed_value.most_recent.arena.?.promote(self.allocator);
defer decl.typed_value.most_recent.arena.?.* = arena.state;
- var analysis: Fn.Analysis = .{
- .inner_block = .{
- .func = func,
- .decl = decl,
- .instructions = .{},
- .arena = &arena.allocator,
- },
- .needed_inst_capacity = 0,
- .inst_table = std.AutoHashMap(*zir.Inst, *Inst).init(self.allocator),
+ var inner_block: Scope.Block = .{
+ .func = func,
+ .decl = decl,
+ .instructions = .{},
+ .arena = &arena.allocator,
};
- defer analysis.inner_block.instructions.deinit(self.allocator);
- defer analysis.inst_table.deinit();
+ defer inner_block.instructions.deinit(self.allocator);
- const fn_inst = func.analysis.queued;
- func.analysis = .{ .in_progress = &analysis };
+ const fn_zir = func.analysis.queued;
+ defer fn_zir.arena.promote(self.allocator).deinit();
+ func.analysis = .{ .in_progress = {} };
+ //std.debug.warn("set {} to in_progress\n", .{decl.name});
- try self.analyzeBody(&analysis.inner_block.base, fn_inst.positionals.body);
+ try self.analyzeBody(&inner_block.base, fn_zir.body);
- func.analysis = .{
- .success = .{
- .instructions = try arena.allocator.dupe(*Inst, analysis.inner_block.instructions.items),
- },
- };
+ const instructions = try arena.allocator.dupe(*Inst, inner_block.instructions.items);
+ func.analysis = .{ .success = .{ .instructions = instructions } };
+ //std.debug.warn("set {} to success\n", .{decl.name});
}
-fn reAnalyzeDecl(self: *Module, decl: *Decl, old_inst: *zir.Inst) InnerError!void {
- switch (decl.analysis) {
- .in_progress => unreachable,
- .dependency_failure,
- .sema_failure,
- .codegen_failure,
- .codegen_failure_retryable,
- .complete,
- => return,
-
- .outdated => {}, // Decl re-analysis
+fn markOutdatedDecl(self: *Module, decl: *Decl) !void {
+ //std.debug.warn("mark {} outdated\n", .{decl.name});
+ try self.work_queue.writeItem(.{ .analyze_decl = decl });
+ if (self.failed_decls.remove(decl)) |entry| {
+ entry.value.destroy(self.allocator);
}
- //std.debug.warn("re-analyzing {}\n", .{decl.name});
- decl.src = old_inst.src;
+ decl.analysis = .outdated;
+}
- // The exports this Decl performs will be re-discovered, so we remove them here
- // prior to re-analysis.
- self.deleteDeclExports(decl);
- // Dependencies will be re-discovered, so we remove them here prior to re-analysis.
- for (decl.dependencies.items) |dep| {
- dep.removeDependant(decl);
- if (dep.dependants.items.len == 0) {
- // We don't perform a deletion here, because this Decl or another one
- // may end up referencing it before the update is complete.
- assert(!dep.deletion_flag);
- dep.deletion_flag = true;
- try self.deletion_set.append(self.allocator, dep);
- }
- }
- decl.dependencies.shrink(self.allocator, 0);
+fn allocateNewDecl(
+ self: *Module,
+ scope: *Scope,
+ src_index: usize,
+ contents_hash: std.zig.SrcHash,
+) !*Decl {
+ const new_decl = try self.allocator.create(Decl);
+ new_decl.* = .{
+ .name = "",
+ .scope = scope.namespace(),
+ .src_index = src_index,
+ .typed_value = .{ .never_succeeded = {} },
+ .analysis = .unreferenced,
+ .deletion_flag = false,
+ .contents_hash = contents_hash,
+ .link = link.ElfFile.TextBlock.empty,
+ .generation = 0,
+ };
+ return new_decl;
+}
+
+fn createNewDecl(
+ self: *Module,
+ scope: *Scope,
+ decl_name: []const u8,
+ src_index: usize,
+ name_hash: Scope.NameHash,
+ contents_hash: std.zig.SrcHash,
+) !*Decl {
+ try self.decl_table.ensureCapacity(self.decl_table.size + 1);
+ const new_decl = try self.allocateNewDecl(scope, src_index, contents_hash);
+ errdefer self.allocator.destroy(new_decl);
+ new_decl.name = try mem.dupeZ(self.allocator, u8, decl_name);
+ self.decl_table.putAssumeCapacityNoClobber(name_hash, new_decl);
+ return new_decl;
+}
+
+fn analyzeZirDecl(self: *Module, decl: *Decl, src_decl: *zir.Decl) InnerError!bool {
var decl_scope: Scope.DeclAnalysis = .{
.decl = decl,
.arena = std.heap.ArenaAllocator.init(self.allocator),
};
errdefer decl_scope.arena.deinit();
- const typed_value = self.analyzeInstConst(&decl_scope.base, old_inst) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.AnalysisFail => {
- switch (decl.analysis) {
- .in_progress => decl.analysis = .dependency_failure,
- else => {},
- }
- decl.generation = self.generation;
- return error.AnalysisFail;
- },
- };
+ decl.analysis = .in_progress;
+
+ const typed_value = try self.analyzeConstInst(&decl_scope.base, src_decl.inst);
const arena_state = try decl_scope.arena.allocator.create(std.heap.ArenaAllocator.State);
- arena_state.* = decl_scope.arena.state;
var prev_type_has_bits = false;
var type_changed = true;
@@ -1061,6 +1957,8 @@ fn reAnalyzeDecl(self: *Module, decl: *Decl, old_inst: *zir.Inst) InnerError!voi
tvm.deinit(self.allocator);
}
+
+ arena_state.* = decl_scope.arena.state;
decl.typed_value = .{
.most_recent = .{
.typed_value = typed_value,
@@ -1079,137 +1977,66 @@ fn reAnalyzeDecl(self: *Module, decl: *Decl, old_inst: *zir.Inst) InnerError!voi
self.bin_file.freeDecl(decl);
}
- // If the decl is a function, and the type is the same, we do not need
- // to chase the dependants.
- if (type_changed or typed_value.val.tag() != .function) {
- for (decl.dependants.items) |dep| {
- switch (dep.analysis) {
- .in_progress => unreachable,
- .outdated => continue, // already queued for update
-
- .dependency_failure,
- .sema_failure,
- .codegen_failure,
- .codegen_failure_retryable,
- .complete,
- => if (dep.generation != self.generation) {
- try self.markOutdatedDecl(dep);
- },
- }
- }
- }
+ return type_changed;
}
-fn markOutdatedDecl(self: *Module, decl: *Decl) !void {
- //std.debug.warn("mark {} outdated\n", .{decl.name});
- try self.work_queue.writeItem(.{ .re_analyze_decl = decl });
- if (self.failed_decls.remove(decl)) |entry| {
- entry.value.destroy(self.allocator);
- }
- decl.analysis = .outdated;
+fn resolveZirDecl(self: *Module, scope: *Scope, src_decl: *zir.Decl) InnerError!*Decl {
+ const zir_module = self.root_scope.cast(Scope.ZIRModule).?;
+ const entry = zir_module.contents.module.findDecl(src_decl.name).?;
+ return self.resolveZirDeclHavingIndex(scope, src_decl, entry.index);
}
-fn resolveDecl(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*Decl {
- const hash = Decl.hashSimpleName(old_inst.name);
- if (self.decl_table.get(hash)) |kv| {
- const decl = kv.value;
- try self.reAnalyzeDecl(decl, old_inst);
- return decl;
- } else if (old_inst.cast(zir.Inst.DeclVal)) |decl_val| {
- // This is just a named reference to another decl.
- return self.analyzeDeclVal(scope, decl_val);
- } else {
- const new_decl = blk: {
- try self.decl_table.ensureCapacity(self.decl_table.size + 1);
- const new_decl = try self.allocator.create(Decl);
- errdefer self.allocator.destroy(new_decl);
- const name = try mem.dupeZ(self.allocator, u8, old_inst.name);
- errdefer self.allocator.free(name);
- new_decl.* = .{
- .name = name,
- .scope = scope.namespace(),
- .src = old_inst.src,
- .typed_value = .{ .never_succeeded = {} },
- .analysis = .in_progress,
- .deletion_flag = false,
- .contents_hash = Decl.hashSimpleName(old_inst.contents),
- .link = link.ElfFile.TextBlock.empty,
- .generation = 0,
- };
- self.decl_table.putAssumeCapacityNoClobber(hash, new_decl);
- break :blk new_decl;
- };
-
- var decl_scope: Scope.DeclAnalysis = .{
- .decl = new_decl,
- .arena = std.heap.ArenaAllocator.init(self.allocator),
- };
- errdefer decl_scope.arena.deinit();
-
- const typed_value = self.analyzeInstConst(&decl_scope.base, old_inst) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.AnalysisFail => {
- switch (new_decl.analysis) {
- .in_progress => new_decl.analysis = .dependency_failure,
- else => {},
- }
- new_decl.generation = self.generation;
- return error.AnalysisFail;
- },
- };
- const arena_state = try decl_scope.arena.allocator.create(std.heap.ArenaAllocator.State);
-
- arena_state.* = decl_scope.arena.state;
-
- new_decl.typed_value = .{
- .most_recent = .{
- .typed_value = typed_value,
- .arena = arena_state,
- },
- };
- new_decl.analysis = .complete;
- new_decl.generation = self.generation;
- if (typed_value.ty.hasCodeGenBits()) {
- // We don't fully codegen the decl until later, but we do need to reserve a global
- // offset table index for it. This allows us to codegen decls out of dependency order,
- // increasing how many computations can be done in parallel.
- try self.bin_file.allocateDeclIndexes(new_decl);
- try self.work_queue.writeItem(.{ .codegen_decl = new_decl });
- }
- return new_decl;
- }
+fn resolveZirDeclHavingIndex(self: *Module, scope: *Scope, src_decl: *zir.Decl, src_index: usize) InnerError!*Decl {
+ const name_hash = scope.namespace().fullyQualifiedNameHash(src_decl.name);
+ const decl = self.decl_table.getValue(name_hash).?;
+ decl.src_index = src_index;
+ try self.ensureDeclAnalyzed(decl);
+ return decl;
}
/// Declares a dependency on the decl.
-fn resolveCompleteDecl(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*Decl {
- const decl = try self.resolveDecl(scope, old_inst);
+fn resolveCompleteZirDecl(self: *Module, scope: *Scope, src_decl: *zir.Decl) InnerError!*Decl {
+ const decl = try self.resolveZirDecl(scope, src_decl);
switch (decl.analysis) {
+ .unreferenced => unreachable,
.in_progress => unreachable,
.outdated => unreachable,
.dependency_failure,
.sema_failure,
+ .sema_failure_retryable,
.codegen_failure,
.codegen_failure_retryable,
=> return error.AnalysisFail,
.complete => {},
}
- if (scope.decl()) |scope_decl| {
- try self.declareDeclDependency(scope_decl, decl);
- }
return decl;
}
+/// TODO Look into removing this function. The body is only needed for .zir files, not .zig files.
fn resolveInst(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*Inst {
- if (scope.cast(Scope.Block)) |block| {
- if (block.func.analysis.in_progress.inst_table.get(old_inst)) |kv| {
- return kv.value;
- }
- }
-
- const decl = try self.resolveCompleteDecl(scope, old_inst);
+ if (old_inst.analyzed_inst) |inst| return inst;
+
+ // If this assert trips, the instruction that was referenced did not get properly
+ // analyzed before it was referenced.
+ const zir_module = scope.namespace().cast(Scope.ZIRModule).?;
+ const entry = if (old_inst.cast(zir.Inst.DeclVal)) |declval| blk: {
+ const decl_name = declval.positionals.name;
+ const entry = zir_module.contents.module.findDecl(decl_name) orelse
+ return self.fail(scope, old_inst.src, "decl '{}' not found", .{decl_name});
+ break :blk entry;
+ } else blk: {
+ // If this assert trips, the instruction that was referenced did not get
+ // properly analyzed by a previous instruction analysis before it was
+ // referenced by the current one.
+ break :blk zir_module.contents.module.findInstDecl(old_inst).?;
+ };
+ const decl = try self.resolveCompleteZirDecl(scope, entry.decl);
const decl_ref = try self.analyzeDeclRef(scope, old_inst.src, decl);
+ // Note: it would be tempting here to store the result into old_inst.analyzed_inst field,
+ // but this would prevent the analyzeDeclRef from happening, which is needed to properly
+ // detect Decl dependencies and dependency failures on updates.
return self.analyzeDeref(scope, old_inst.src, decl_ref, old_inst.src);
}
@@ -1258,21 +2085,16 @@ fn resolveType(self: *Module, scope: *Scope, old_inst: *zir.Inst) !Type {
return val.toType();
}
-fn analyzeExport(self: *Module, scope: *Scope, export_inst: *zir.Inst.Export) InnerError!void {
- try self.decl_exports.ensureCapacity(self.decl_exports.size + 1);
- try self.export_owners.ensureCapacity(self.export_owners.size + 1);
- const symbol_name = try self.resolveConstString(scope, export_inst.positionals.symbol_name);
- const exported_decl = try self.resolveCompleteDecl(scope, export_inst.positionals.value);
+fn analyzeExport(self: *Module, scope: *Scope, src: usize, symbol_name: []const u8, exported_decl: *Decl) !void {
+ try self.ensureDeclAnalyzed(exported_decl);
const typed_value = exported_decl.typed_value.most_recent.typed_value;
switch (typed_value.ty.zigTypeTag()) {
.Fn => {},
- else => return self.fail(
- scope,
- export_inst.positionals.value.src,
- "unable to export type '{}'",
- .{typed_value.ty},
- ),
+ else => return self.fail(scope, src, "unable to export type '{}'", .{typed_value.ty}),
}
+ try self.decl_exports.ensureCapacity(self.decl_exports.size + 1);
+ try self.export_owners.ensureCapacity(self.export_owners.size + 1);
+
const new_export = try self.allocator.create(Export);
errdefer self.allocator.destroy(new_export);
@@ -1280,7 +2102,7 @@ fn analyzeExport(self: *Module, scope: *Scope, export_inst: *zir.Inst.Export) In
new_export.* = .{
.options = .{ .name = symbol_name },
- .src = export_inst.base.src,
+ .src = src,
.link = .{},
.owner_decl = owner_decl,
.exported_decl = exported_decl,
@@ -1311,7 +2133,7 @@ fn analyzeExport(self: *Module, scope: *Scope, export_inst: *zir.Inst.Export) In
try self.failed_exports.ensureCapacity(self.failed_exports.size + 1);
self.failed_exports.putAssumeCapacityNoClobber(new_export, try ErrorMsg.create(
self.allocator,
- export_inst.base.src,
+ src,
"unable to export: {}",
.{@errorName(err)},
));
@@ -1320,7 +2142,6 @@ fn analyzeExport(self: *Module, scope: *Scope, export_inst: *zir.Inst.Export) In
};
}
-/// TODO should not need the cast on the last parameter at the callsites
fn addNewInstArgs(
self: *Module,
block: *Scope.Block,
@@ -1334,6 +2155,46 @@ fn addNewInstArgs(
return &inst.base;
}
+fn newZIRInst(
+ allocator: *Allocator,
+ src: usize,
+ comptime T: type,
+ positionals: std.meta.fieldInfo(T, "positionals").field_type,
+ kw_args: std.meta.fieldInfo(T, "kw_args").field_type,
+) !*zir.Inst {
+ const inst = try allocator.create(T);
+ inst.* = .{
+ .base = .{
+ .tag = T.base_tag,
+ .src = src,
+ },
+ .positionals = positionals,
+ .kw_args = kw_args,
+ };
+ return &inst.base;
+}
+
+fn addZIRInst(
+ self: *Module,
+ scope: *Scope,
+ src: usize,
+ comptime T: type,
+ positionals: std.meta.fieldInfo(T, "positionals").field_type,
+ kw_args: std.meta.fieldInfo(T, "kw_args").field_type,
+) !*zir.Inst {
+ const gen_zir = scope.cast(Scope.GenZIR).?;
+ try gen_zir.instructions.ensureCapacity(gen_zir.instructions.items.len + 1);
+ const inst = try newZIRInst(&gen_zir.arena.allocator, src, T, positionals, kw_args);
+ gen_zir.instructions.appendAssumeCapacity(inst);
+ return inst;
+}
+
+/// TODO The existence of this function is a workaround for a bug in stage1.
+fn addZIRInstConst(self: *Module, scope: *Scope, src: usize, typed_value: TypedValue) !*zir.Inst {
+ const P = std.meta.fieldInfo(zir.Inst.Const, "positionals").field_type;
+ return self.addZIRInst(scope, src, zir.Inst.Const, P{ .typed_value = typed_value }, .{});
+}
+
fn addNewInst(self: *Module, block: *Scope.Block, src: usize, ty: Type, comptime T: type) !*T {
const inst = try block.arena.create(T);
inst.* = .{
@@ -1361,19 +2222,6 @@ fn constInst(self: *Module, scope: *Scope, src: usize, typed_value: TypedValue)
return &const_inst.base;
}
-fn constStr(self: *Module, scope: *Scope, src: usize, str: []const u8) !*Inst {
- const ty_payload = try scope.arena().create(Type.Payload.Array_u8_Sentinel0);
- ty_payload.* = .{ .len = str.len };
-
- const bytes_payload = try scope.arena().create(Value.Payload.Bytes);
- bytes_payload.* = .{ .data = str };
-
- return self.constInst(scope, src, .{
- .ty = Type.initPayload(&ty_payload.base),
- .val = Value.initPayload(&bytes_payload.base),
- });
-}
-
fn constType(self: *Module, scope: *Scope, src: usize, ty: Type) !*Inst {
return self.constInst(scope, src, .{
.ty = Type.initTag(.type),
@@ -1388,6 +2236,13 @@ fn constVoid(self: *Module, scope: *Scope, src: usize) !*Inst {
});
}
+fn constNoReturn(self: *Module, scope: *Scope, src: usize) !*Inst {
+ return self.constInst(scope, src, .{
+ .ty = Type.initTag(.noreturn),
+ .val = Value.initTag(.the_one_possible_value),
+ });
+}
+
fn constUndef(self: *Module, scope: *Scope, src: usize, ty: Type) !*Inst {
return self.constInst(scope, src, .{
.ty = ty,
@@ -1451,7 +2306,7 @@ fn constIntBig(self: *Module, scope: *Scope, src: usize, ty: Type, big_int: BigI
});
}
-fn analyzeInstConst(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!TypedValue {
+fn analyzeConstInst(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!TypedValue {
const new_inst = try self.analyzeInst(scope, old_inst);
return TypedValue{
.ty = new_inst.ty,
@@ -1459,20 +2314,24 @@ fn analyzeInstConst(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerErro
};
}
+fn analyzeInstConst(self: *Module, scope: *Scope, const_inst: *zir.Inst.Const) InnerError!*Inst {
+ // Move the TypedValue from old memory to new memory. This allows freeing the ZIR instructions
+ // after analysis.
+ const typed_value_copy = try const_inst.positionals.typed_value.copy(scope.arena());
+ return self.constInst(scope, const_inst.base.src, typed_value_copy);
+}
+
fn analyzeInst(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*Inst {
switch (old_inst.tag) {
.breakpoint => return self.analyzeInstBreakpoint(scope, old_inst.cast(zir.Inst.Breakpoint).?),
.call => return self.analyzeInstCall(scope, old_inst.cast(zir.Inst.Call).?),
.compileerror => return self.analyzeInstCompileError(scope, old_inst.cast(zir.Inst.CompileError).?),
+ .@"const" => return self.analyzeInstConst(scope, old_inst.cast(zir.Inst.Const).?),
.declref => return self.analyzeInstDeclRef(scope, old_inst.cast(zir.Inst.DeclRef).?),
+ .declref_str => return self.analyzeInstDeclRefStr(scope, old_inst.cast(zir.Inst.DeclRefStr).?),
.declval => return self.analyzeInstDeclVal(scope, old_inst.cast(zir.Inst.DeclVal).?),
- .str => {
- const bytes = old_inst.cast(zir.Inst.Str).?.positionals.bytes;
- // The bytes references memory inside the ZIR module, which can get deallocated
- // after semantic analysis is complete. We need the memory to be in the Decl's arena.
- const arena_bytes = try scope.arena().dupe(u8, bytes);
- return self.constStr(scope, old_inst.src, arena_bytes);
- },
+ .declval_in_module => return self.analyzeInstDeclValInModule(scope, old_inst.cast(zir.Inst.DeclValInModule).?),
+ .str => return self.analyzeInstStr(scope, old_inst.cast(zir.Inst.Str).?),
.int => {
const big_int = old_inst.cast(zir.Inst.Int).?.positionals.int;
return self.constIntBig(scope, old_inst.src, Type.initTag(.comptime_int), big_int);
@@ -1484,13 +2343,10 @@ fn analyzeInst(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*In
.@"asm" => return self.analyzeInstAsm(scope, old_inst.cast(zir.Inst.Asm).?),
.@"unreachable" => return self.analyzeInstUnreachable(scope, old_inst.cast(zir.Inst.Unreachable).?),
.@"return" => return self.analyzeInstRet(scope, old_inst.cast(zir.Inst.Return).?),
+ .returnvoid => return self.analyzeInstRetVoid(scope, old_inst.cast(zir.Inst.ReturnVoid).?),
.@"fn" => return self.analyzeInstFn(scope, old_inst.cast(zir.Inst.Fn).?),
- .@"export" => {
- try self.analyzeExport(scope, old_inst.cast(zir.Inst.Export).?);
- return self.constVoid(scope, old_inst.src);
- },
+ .@"export" => return self.analyzeInstExport(scope, old_inst.cast(zir.Inst.Export).?),
.primitive => return self.analyzeInstPrimitive(scope, old_inst.cast(zir.Inst.Primitive).?),
- .ref => return self.analyzeInstRef(scope, old_inst.cast(zir.Inst.Ref).?),
.fntype => return self.analyzeInstFnType(scope, old_inst.cast(zir.Inst.FnType).?),
.intcast => return self.analyzeInstIntCast(scope, old_inst.cast(zir.Inst.IntCast).?),
.bitcast => return self.analyzeInstBitCast(scope, old_inst.cast(zir.Inst.BitCast).?),
@@ -1503,39 +2359,104 @@ fn analyzeInst(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*In
}
}
+fn analyzeInstStr(self: *Module, scope: *Scope, str_inst: *zir.Inst.Str) InnerError!*Inst {
+ // The bytes references memory inside the ZIR module, which can get deallocated
+ // after semantic analysis is complete. We need the memory to be in the new anonymous Decl's arena.
+ var new_decl_arena = std.heap.ArenaAllocator.init(self.allocator);
+ const arena_bytes = try new_decl_arena.allocator.dupe(u8, str_inst.positionals.bytes);
+
+ const ty_payload = try scope.arena().create(Type.Payload.Array_u8_Sentinel0);
+ ty_payload.* = .{ .len = arena_bytes.len };
+
+ const bytes_payload = try scope.arena().create(Value.Payload.Bytes);
+ bytes_payload.* = .{ .data = arena_bytes };
+
+ const new_decl = try self.createAnonymousDecl(scope, &new_decl_arena, .{
+ .ty = Type.initPayload(&ty_payload.base),
+ .val = Value.initPayload(&bytes_payload.base),
+ });
+ return self.analyzeDeclRef(scope, str_inst.base.src, new_decl);
+}
+
+fn createAnonymousDecl(
+ self: *Module,
+ scope: *Scope,
+ decl_arena: *std.heap.ArenaAllocator,
+ typed_value: TypedValue,
+) !*Decl {
+ const name_index = self.getNextAnonNameIndex();
+ const scope_decl = scope.decl().?;
+ const name = try std.fmt.allocPrint(self.allocator, "{}${}", .{ scope_decl.name, name_index });
+ defer self.allocator.free(name);
+ const name_hash = scope.namespace().fullyQualifiedNameHash(name);
+ const src_hash: std.zig.SrcHash = undefined;
+ const new_decl = try self.createNewDecl(scope, name, scope_decl.src_index, name_hash, src_hash);
+ const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State);
+
+ decl_arena_state.* = decl_arena.state;
+ new_decl.typed_value = .{
+ .most_recent = .{
+ .typed_value = typed_value,
+ .arena = decl_arena_state,
+ },
+ };
+ new_decl.analysis = .complete;
+ new_decl.generation = self.generation;
+
+ // TODO: This generates the Decl into the machine code file if it is of a type that is non-zero size.
+ // We should be able to further improve the compiler to not omit Decls which are only referenced at
+ // compile-time and not runtime.
+ if (typed_value.ty.hasCodeGenBits()) {
+ try self.bin_file.allocateDeclIndexes(new_decl);
+ try self.work_queue.writeItem(.{ .codegen_decl = new_decl });
+ }
+
+ return new_decl;
+}
+
+fn getNextAnonNameIndex(self: *Module) usize {
+ return @atomicRmw(usize, &self.next_anon_name_index, .Add, 1, .Monotonic);
+}
+
+fn lookupDeclName(self: *Module, scope: *Scope, ident_name: []const u8) ?*Decl {
+ const namespace = scope.namespace();
+ const name_hash = namespace.fullyQualifiedNameHash(ident_name);
+ return self.decl_table.getValue(name_hash);
+}
+
+fn analyzeInstExport(self: *Module, scope: *Scope, export_inst: *zir.Inst.Export) InnerError!*Inst {
+ const symbol_name = try self.resolveConstString(scope, export_inst.positionals.symbol_name);
+ const exported_decl = self.lookupDeclName(scope, export_inst.positionals.decl_name) orelse
+ return self.fail(scope, export_inst.base.src, "decl '{}' not found", .{export_inst.positionals.decl_name});
+ try self.analyzeExport(scope, export_inst.base.src, symbol_name, exported_decl);
+ return self.constVoid(scope, export_inst.base.src);
+}
+
fn analyzeInstCompileError(self: *Module, scope: *Scope, inst: *zir.Inst.CompileError) InnerError!*Inst {
return self.fail(scope, inst.base.src, "{}", .{inst.positionals.msg});
}
fn analyzeInstBreakpoint(self: *Module, scope: *Scope, inst: *zir.Inst.Breakpoint) InnerError!*Inst {
const b = try self.requireRuntimeBlock(scope, inst.base.src);
- return self.addNewInstArgs(b, inst.base.src, Type.initTag(.void), Inst.Breakpoint, Inst.Args(Inst.Breakpoint){});
+ return self.addNewInstArgs(b, inst.base.src, Type.initTag(.void), Inst.Breakpoint, {});
}
-fn analyzeInstRef(self: *Module, scope: *Scope, inst: *zir.Inst.Ref) InnerError!*Inst {
- const decl = try self.resolveCompleteDecl(scope, inst.positionals.operand);
- return self.analyzeDeclRef(scope, inst.base.src, decl);
+fn analyzeInstDeclRefStr(self: *Module, scope: *Scope, inst: *zir.Inst.DeclRefStr) InnerError!*Inst {
+ const decl_name = try self.resolveConstString(scope, inst.positionals.name);
+ return self.analyzeDeclRefByName(scope, inst.base.src, decl_name);
}
fn analyzeInstDeclRef(self: *Module, scope: *Scope, inst: *zir.Inst.DeclRef) InnerError!*Inst {
- const decl_name = try self.resolveConstString(scope, inst.positionals.name);
- // This will need to get more fleshed out when there are proper structs & namespaces.
- const zir_module = scope.namespace();
- const src_decl = zir_module.contents.module.findDecl(decl_name) orelse
- return self.fail(scope, inst.positionals.name.src, "use of undeclared identifier '{}'", .{decl_name});
-
- const decl = try self.resolveCompleteDecl(scope, src_decl);
- return self.analyzeDeclRef(scope, inst.base.src, decl);
+ return self.analyzeDeclRefByName(scope, inst.base.src, inst.positionals.name);
}
fn analyzeDeclVal(self: *Module, scope: *Scope, inst: *zir.Inst.DeclVal) InnerError!*Decl {
const decl_name = inst.positionals.name;
- // This will need to get more fleshed out when there are proper structs & namespaces.
- const zir_module = scope.namespace();
+ const zir_module = scope.namespace().cast(Scope.ZIRModule).?;
const src_decl = zir_module.contents.module.findDecl(decl_name) orelse
return self.fail(scope, inst.base.src, "use of undeclared identifier '{}'", .{decl_name});
- const decl = try self.resolveCompleteDecl(scope, src_decl);
+ const decl = try self.resolveCompleteZirDecl(scope, src_decl.decl);
return decl;
}
@@ -1546,18 +2467,46 @@ fn analyzeInstDeclVal(self: *Module, scope: *Scope, inst: *zir.Inst.DeclVal) Inn
return self.analyzeDeref(scope, inst.base.src, ptr, inst.base.src);
}
+fn analyzeInstDeclValInModule(self: *Module, scope: *Scope, inst: *zir.Inst.DeclValInModule) InnerError!*Inst {
+ const decl = inst.positionals.decl;
+ const ptr = try self.analyzeDeclRef(scope, inst.base.src, decl);
+ return self.analyzeDeref(scope, inst.base.src, ptr, inst.base.src);
+}
+
fn analyzeDeclRef(self: *Module, scope: *Scope, src: usize, decl: *Decl) InnerError!*Inst {
+ const scope_decl = scope.decl().?;
+ try self.declareDeclDependency(scope_decl, decl);
+ self.ensureDeclAnalyzed(decl) catch |err| {
+ if (scope.cast(Scope.Block)) |block| {
+ if (block.func) |func| {
+ func.analysis = .dependency_failure;
+ } else {
+ block.decl.analysis = .dependency_failure;
+ }
+ } else {
+ scope_decl.analysis = .dependency_failure;
+ }
+ return err;
+ };
+
const decl_tv = try decl.typedValue();
const ty_payload = try scope.arena().create(Type.Payload.SingleConstPointer);
ty_payload.* = .{ .pointee_type = decl_tv.ty };
const val_payload = try scope.arena().create(Value.Payload.DeclRef);
val_payload.* = .{ .decl = decl };
+
return self.constInst(scope, src, .{
.ty = Type.initPayload(&ty_payload.base),
.val = Value.initPayload(&val_payload.base),
});
}
+fn analyzeDeclRefByName(self: *Module, scope: *Scope, src: usize, decl_name: []const u8) InnerError!*Inst {
+ const decl = self.lookupDeclName(scope, decl_name) orelse
+ return self.fail(scope, src, "decl '{}' not found", .{decl_name});
+ return self.analyzeDeclRef(scope, src, decl);
+}
+
fn analyzeInstCall(self: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError!*Inst {
const func = try self.resolveInst(scope, inst.positionals.func);
if (func.ty.zigTypeTag() != .Fn)
@@ -1616,7 +2565,7 @@ fn analyzeInstCall(self: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerErro
}
const b = try self.requireRuntimeBlock(scope, inst.base.src);
- return self.addNewInstArgs(b, inst.base.src, Type.initTag(.void), Inst.Call, Inst.Args(Inst.Call){
+ return self.addNewInstArgs(b, inst.base.src, Type.initTag(.void), Inst.Call, .{
.func = func,
.args = casted_args,
});
@@ -1624,10 +2573,22 @@ fn analyzeInstCall(self: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerErro
fn analyzeInstFn(self: *Module, scope: *Scope, fn_inst: *zir.Inst.Fn) InnerError!*Inst {
const fn_type = try self.resolveType(scope, fn_inst.positionals.fn_type);
+ const fn_zir = blk: {
+ var fn_arena = std.heap.ArenaAllocator.init(self.allocator);
+ errdefer fn_arena.deinit();
+
+ const fn_zir = try scope.arena().create(Fn.ZIR);
+ fn_zir.* = .{
+ .body = .{
+ .instructions = fn_inst.positionals.body.instructions,
+ },
+ .arena = fn_arena.state,
+ };
+ break :blk fn_zir;
+ };
const new_func = try scope.arena().create(Fn);
new_func.* = .{
- .fn_type = fn_type,
- .analysis = .{ .queued = fn_inst },
+ .analysis = .{ .queued = fn_zir },
.owner_decl = scope.decl().?,
};
const fn_payload = try scope.arena().create(Value.Payload.Function);
@@ -1648,6 +2609,13 @@ fn analyzeInstFnType(self: *Module, scope: *Scope, fntype: *zir.Inst.FnType) Inn
return self.constType(scope, fntype.base.src, Type.initTag(.fn_noreturn_no_args));
}
+ if (return_type.zigTypeTag() == .Void and
+ fntype.positionals.param_types.len == 0 and
+ fntype.kw_args.cc == .Unspecified)
+ {
+ return self.constType(scope, fntype.base.src, Type.initTag(.fn_void_no_args));
+ }
+
if (return_type.zigTypeTag() == .NoReturn and
fntype.positionals.param_types.len == 0 and
fntype.kw_args.cc == .Naked)
@@ -1683,7 +2651,7 @@ fn analyzeInstPtrToInt(self: *Module, scope: *Scope, ptrtoint: *zir.Inst.PtrToIn
// TODO handle known-pointer-address
const b = try self.requireRuntimeBlock(scope, ptrtoint.base.src);
const ty = Type.initTag(.usize);
- return self.addNewInstArgs(b, ptrtoint.base.src, ty, Inst.PtrToInt, Inst.Args(Inst.PtrToInt){ .ptr = ptr });
+ return self.addNewInstArgs(b, ptrtoint.base.src, ty, Inst.PtrToInt, .{ .ptr = ptr });
}
fn analyzeInstFieldPtr(self: *Module, scope: *Scope, fieldptr: *zir.Inst.FieldPtr) InnerError!*Inst {
@@ -1875,7 +2843,7 @@ fn analyzeInstAsm(self: *Module, scope: *Scope, assembly: *zir.Inst.Asm) InnerEr
}
const b = try self.requireRuntimeBlock(scope, assembly.base.src);
- return self.addNewInstArgs(b, assembly.base.src, return_type, Inst.Assembly, Inst.Args(Inst.Assembly){
+ return self.addNewInstArgs(b, assembly.base.src, return_type, Inst.Assembly, .{
.asm_source = asm_source,
.is_volatile = assembly.kw_args.@"volatile",
.output = output,
@@ -1911,20 +2879,12 @@ fn analyzeInstCmp(self: *Module, scope: *Scope, inst: *zir.Inst.Cmp) InnerError!
}
const b = try self.requireRuntimeBlock(scope, inst.base.src);
switch (op) {
- .eq => return self.addNewInstArgs(
- b,
- inst.base.src,
- Type.initTag(.bool),
- Inst.IsNull,
- Inst.Args(Inst.IsNull){ .operand = opt_operand },
- ),
- .neq => return self.addNewInstArgs(
- b,
- inst.base.src,
- Type.initTag(.bool),
- Inst.IsNonNull,
- Inst.Args(Inst.IsNonNull){ .operand = opt_operand },
- ),
+ .eq => return self.addNewInstArgs(b, inst.base.src, Type.initTag(.bool), Inst.IsNull, .{
+ .operand = opt_operand,
+ }),
+ .neq => return self.addNewInstArgs(b, inst.base.src, Type.initTag(.bool), Inst.IsNonNull, .{
+ .operand = opt_operand,
+ }),
else => unreachable,
}
} else if (is_equality_cmp and
@@ -2019,23 +2979,19 @@ fn analyzeInstUnreachable(self: *Module, scope: *Scope, unreach: *zir.Inst.Unrea
}
fn analyzeInstRet(self: *Module, scope: *Scope, inst: *zir.Inst.Return) InnerError!*Inst {
+ const operand = try self.resolveInst(scope, inst.positionals.operand);
const b = try self.requireRuntimeBlock(scope, inst.base.src);
- return self.addNewInstArgs(b, inst.base.src, Type.initTag(.noreturn), Inst.Ret, {});
+ return self.addNewInstArgs(b, inst.base.src, Type.initTag(.noreturn), Inst.Ret, .{ .operand = operand });
+}
+
+fn analyzeInstRetVoid(self: *Module, scope: *Scope, inst: *zir.Inst.ReturnVoid) InnerError!*Inst {
+ const b = try self.requireRuntimeBlock(scope, inst.base.src);
+ return self.addNewInstArgs(b, inst.base.src, Type.initTag(.noreturn), Inst.RetVoid, {});
}
fn analyzeBody(self: *Module, scope: *Scope, body: zir.Module.Body) !void {
- if (scope.cast(Scope.Block)) |b| {
- const analysis = b.func.analysis.in_progress;
- analysis.needed_inst_capacity += body.instructions.len;
- try analysis.inst_table.ensureCapacity(analysis.needed_inst_capacity);
- for (body.instructions) |src_inst| {
- const new_inst = try self.analyzeInst(scope, src_inst);
- analysis.inst_table.putAssumeCapacityNoClobber(src_inst, new_inst);
- }
- } else {
- for (body.instructions) |src_inst| {
- _ = try self.analyzeInst(scope, src_inst);
- }
+ for (body.instructions) |src_inst| {
+ src_inst.analyzed_inst = try self.analyzeInst(scope, src_inst);
}
}
@@ -2118,7 +3074,7 @@ fn cmpNumeric(
};
const casted_lhs = try self.coerce(scope, dest_type, lhs);
const casted_rhs = try self.coerce(scope, dest_type, rhs);
- return self.addNewInstArgs(b, src, dest_type, Inst.Cmp, Inst.Args(Inst.Cmp){
+ return self.addNewInstArgs(b, src, dest_type, Inst.Cmp, .{
.lhs = casted_lhs,
.rhs = casted_rhs,
.op = op,
@@ -2222,7 +3178,7 @@ fn cmpNumeric(
const casted_lhs = try self.coerce(scope, dest_type, lhs);
const casted_rhs = try self.coerce(scope, dest_type, lhs);
- return self.addNewInstArgs(b, src, dest_type, Inst.Cmp, Inst.Args(Inst.Cmp){
+ return self.addNewInstArgs(b, src, dest_type, Inst.Cmp, .{
.lhs = casted_lhs,
.rhs = casted_rhs,
.op = op,
@@ -2299,7 +3255,7 @@ fn bitcast(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst {
}
// TODO validate the type size and other compile errors
const b = try self.requireRuntimeBlock(scope, inst.src);
- return self.addNewInstArgs(b, inst.src, dest_type, Inst.BitCast, Inst.Args(Inst.BitCast){ .operand = inst });
+ return self.addNewInstArgs(b, inst.src, dest_type, Inst.BitCast, .{ .operand = inst });
}
fn coerceArrayPtrToSlice(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst {
@@ -2316,6 +3272,30 @@ fn fail(self: *Module, scope: *Scope, src: usize, comptime format: []const u8, a
return self.failWithOwnedErrorMsg(scope, src, err_msg);
}
+fn failTok(
+ self: *Module,
+ scope: *Scope,
+ token_index: ast.TokenIndex,
+ comptime format: []const u8,
+ args: var,
+) InnerError {
+ @setCold(true);
+ const src = scope.tree().token_locs[token_index].start;
+ return self.fail(scope, src, format, args);
+}
+
+fn failNode(
+ self: *Module,
+ scope: *Scope,
+ ast_node: *ast.Node,
+ comptime format: []const u8,
+ args: var,
+) InnerError {
+ @setCold(true);
+ const src = scope.tree().token_locs[ast_node.firstToken()].start;
+ return self.fail(scope, src, format, args);
+}
+
fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *ErrorMsg) InnerError {
{
errdefer err_msg.destroy(self.allocator);
@@ -2326,18 +3306,31 @@ fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *Err
.decl => {
const decl = scope.cast(Scope.DeclAnalysis).?.decl;
decl.analysis = .sema_failure;
+ decl.generation = self.generation;
self.failed_decls.putAssumeCapacityNoClobber(decl, err_msg);
},
.block => {
const block = scope.cast(Scope.Block).?;
- block.func.analysis = .sema_failure;
+ if (block.func) |func| {
+ func.analysis = .sema_failure;
+ } else {
+ block.decl.analysis = .sema_failure;
+ block.decl.generation = self.generation;
+ }
self.failed_decls.putAssumeCapacityNoClobber(block.decl, err_msg);
},
+ .gen_zir => {
+ const gen_zir = scope.cast(Scope.GenZIR).?;
+ gen_zir.decl.analysis = .sema_failure;
+ gen_zir.decl.generation = self.generation;
+ self.failed_decls.putAssumeCapacityNoClobber(gen_zir.decl, err_msg);
+ },
.zir_module => {
const zir_module = scope.cast(Scope.ZIRModule).?;
zir_module.status = .loaded_sema_failure;
- self.failed_files.putAssumeCapacityNoClobber(zir_module, err_msg);
+ self.failed_files.putAssumeCapacityNoClobber(scope, err_msg);
},
+ .file => unreachable,
}
return error.AnalysisFail;
}
@@ -2385,3 +3378,7 @@ pub const ErrorMsg = struct {
self.* = undefined;
}
};
+
+fn srcHashEql(a: std.zig.SrcHash, b: std.zig.SrcHash) bool {
+ return @bitCast(u128, a) == @bitCast(u128, b);
+}
diff --git a/src-self-hosted/TypedValue.zig b/src-self-hosted/TypedValue.zig
index 83a8f3c09f..48b2c04970 100644
--- a/src-self-hosted/TypedValue.zig
+++ b/src-self-hosted/TypedValue.zig
@@ -21,3 +21,11 @@ pub const Managed = struct {
self.* = undefined;
}
};
+
+/// Assumes arena allocation. Does a recursive copy.
+pub fn copy(self: TypedValue, allocator: *Allocator) error{OutOfMemory}!TypedValue {
+ return TypedValue{
+ .ty = try self.ty.copy(allocator),
+ .val = try self.val.copy(allocator),
+ };
+}
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index a3a15b463e..7473ccc431 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -10,6 +10,7 @@ const Module = @import("Module.zig");
const ErrorMsg = Module.ErrorMsg;
const Target = std.Target;
const Allocator = mem.Allocator;
+const trace = @import("tracy.zig").trace;
pub const Result = union(enum) {
/// The `code` parameter passed to `generateSymbol` has the value appended.
@@ -29,6 +30,9 @@ pub fn generateSymbol(
/// A Decl that this symbol depends on had a semantic analysis failure.
AnalysisFail,
}!Result {
+ const tracy = trace(@src());
+ defer tracy.end();
+
switch (typed_value.ty.zigTypeTag()) {
.Fn => {
const module_fn = typed_value.val.cast(Value.Payload.Function).?.func;
@@ -178,6 +182,7 @@ const Function = struct {
.ptrtoint => return self.genPtrToInt(inst.cast(ir.Inst.PtrToInt).?),
.bitcast => return self.genBitCast(inst.cast(ir.Inst.BitCast).?),
.ret => return self.genRet(inst.cast(ir.Inst.Ret).?),
+ .retvoid => return self.genRetVoid(inst.cast(ir.Inst.RetVoid).?),
.cmp => return self.genCmp(inst.cast(ir.Inst.Cmp).?),
.condbr => return self.genCondBr(inst.cast(ir.Inst.CondBr).?),
.isnull => return self.genIsNull(inst.cast(ir.Inst.IsNull).?),
@@ -213,7 +218,7 @@ const Function = struct {
try self.code.resize(self.code.items.len + 7);
self.code.items[self.code.items.len - 7 ..][0..3].* = [3]u8{ 0xff, 0x14, 0x25 };
mem.writeIntLittle(u32, self.code.items[self.code.items.len - 4 ..][0..4], got_addr);
- const return_type = func.fn_type.fnReturnType();
+ const return_type = func.owner_decl.typed_value.most_recent.typed_value.ty.fnReturnType();
switch (return_type.zigTypeTag()) {
.Void => return MCValue{ .none = {} },
.NoReturn => return MCValue{ .unreach = {} },
@@ -230,16 +235,28 @@ const Function = struct {
}
}
- fn genRet(self: *Function, inst: *ir.Inst.Ret) !MCValue {
+ fn ret(self: *Function, src: usize, mcv: MCValue) !MCValue {
+ if (mcv != .none) {
+ return self.fail(src, "TODO implement return with non-void operand", .{});
+ }
switch (self.target.cpu.arch) {
.i386, .x86_64 => {
try self.code.append(0xc3); // ret
},
- else => return self.fail(inst.base.src, "TODO implement return for {}", .{self.target.cpu.arch}),
+ else => return self.fail(src, "TODO implement return for {}", .{self.target.cpu.arch}),
}
return .unreach;
}
+ fn genRet(self: *Function, inst: *ir.Inst.Ret) !MCValue {
+ const operand = try self.resolveInst(inst.args.operand);
+ return self.ret(inst.base.src, operand);
+ }
+
+ fn genRetVoid(self: *Function, inst: *ir.Inst.RetVoid) !MCValue {
+ return self.ret(inst.base.src, .none);
+ }
+
fn genCmp(self: *Function, inst: *ir.Inst.Cmp) !MCValue {
switch (self.target.cpu.arch) {
else => return self.fail(inst.base.src, "TODO implement cmp for {}", .{self.target.cpu.arch}),
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index 330b1c4135..387c88df3b 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -26,6 +26,7 @@ pub const Inst = struct {
isnull,
ptrtoint,
ret,
+ retvoid,
unreach,
};
@@ -146,6 +147,14 @@ pub const Inst = struct {
pub const Ret = struct {
pub const base_tag = Tag.ret;
base: Inst,
+ args: struct {
+ operand: *Inst,
+ },
+ };
+
+ pub const RetVoid = struct {
+ pub const base_tag = Tag.retvoid;
+ base: Inst,
args: void,
};
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index f394d45864..c6acf21b84 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -369,7 +369,7 @@ pub const ElfFile = struct {
const file_size = self.options.program_code_size_hint;
const p_align = 0x1000;
const off = self.findFreeSpace(file_size, p_align);
- //std.debug.warn("found PT_LOAD free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
+ //std.log.debug(.link, "found PT_LOAD free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
try self.program_headers.append(self.allocator, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
@@ -390,7 +390,7 @@ pub const ElfFile = struct {
// page align.
const p_align = 0x1000;
const off = self.findFreeSpace(file_size, p_align);
- //std.debug.warn("found PT_LOAD free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
+ //std.log.debug(.link, "found PT_LOAD free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
// TODO instead of hard coding the vaddr, make a function to find a vaddr to put things at.
// we'll need to re-use that function anyway, in case the GOT grows and overlaps something
// else in virtual memory.
@@ -412,7 +412,7 @@ pub const ElfFile = struct {
assert(self.shstrtab.items.len == 0);
try self.shstrtab.append(self.allocator, 0); // need a 0 at position 0
const off = self.findFreeSpace(self.shstrtab.items.len, 1);
- //std.debug.warn("found shstrtab free space 0x{x} to 0x{x}\n", .{ off, off + self.shstrtab.items.len });
+ //std.log.debug(.link, "found shstrtab free space 0x{x} to 0x{x}\n", .{ off, off + self.shstrtab.items.len });
try self.sections.append(self.allocator, .{
.sh_name = try self.makeString(".shstrtab"),
.sh_type = elf.SHT_STRTAB,
@@ -470,7 +470,7 @@ pub const ElfFile = struct {
const each_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym);
const file_size = self.options.symbol_count_hint * each_size;
const off = self.findFreeSpace(file_size, min_align);
- //std.debug.warn("found symtab free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
+ //std.log.debug(.link, "found symtab free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
try self.sections.append(self.allocator, .{
.sh_name = try self.makeString(".symtab"),
@@ -586,7 +586,7 @@ pub const ElfFile = struct {
shstrtab_sect.sh_offset = self.findFreeSpace(needed_size, 1);
}
shstrtab_sect.sh_size = needed_size;
- //std.debug.warn("shstrtab start=0x{x} end=0x{x}\n", .{ shstrtab_sect.sh_offset, shstrtab_sect.sh_offset + needed_size });
+ //std.log.debug(.link, "shstrtab start=0x{x} end=0x{x}\n", .{ shstrtab_sect.sh_offset, shstrtab_sect.sh_offset + needed_size });
try self.file.?.pwriteAll(self.shstrtab.items, shstrtab_sect.sh_offset);
if (!self.shdr_table_dirty) {
@@ -632,7 +632,7 @@ pub const ElfFile = struct {
for (buf) |*shdr, i| {
shdr.* = self.sections.items[i];
- //std.debug.warn("writing section {}\n", .{shdr.*});
+ //std.log.debug(.link, "writing section {}\n", .{shdr.*});
if (foreign_endian) {
bswapAllFields(elf.Elf64_Shdr, shdr);
}
@@ -956,10 +956,10 @@ pub const ElfFile = struct {
try self.offset_table_free_list.ensureCapacity(self.allocator, self.local_symbols.items.len);
if (self.local_symbol_free_list.popOrNull()) |i| {
- //std.debug.warn("reusing symbol index {} for {}\n", .{i, decl.name});
+ //std.log.debug(.link, "reusing symbol index {} for {}\n", .{i, decl.name});
decl.link.local_sym_index = i;
} else {
- //std.debug.warn("allocating symbol index {} for {}\n", .{self.local_symbols.items.len, decl.name});
+ //std.log.debug(.link, "allocating symbol index {} for {}\n", .{self.local_symbols.items.len, decl.name});
decl.link.local_sym_index = @intCast(u32, self.local_symbols.items.len);
_ = self.local_symbols.addOneAssumeCapacity();
}
@@ -1002,7 +1002,7 @@ pub const ElfFile = struct {
defer code_buffer.deinit();
const typed_value = decl.typed_value.most_recent.typed_value;
- const code = switch (try codegen.generateSymbol(self, decl.src, typed_value, &code_buffer)) {
+ const code = switch (try codegen.generateSymbol(self, decl.src(), typed_value, &code_buffer)) {
.externally_managed => |x| x,
.appended => code_buffer.items,
.fail => |em| {
@@ -1027,11 +1027,11 @@ pub const ElfFile = struct {
!mem.isAlignedGeneric(u64, local_sym.st_value, required_alignment);
if (need_realloc) {
const vaddr = try self.growTextBlock(&decl.link, code.len, required_alignment);
- //std.debug.warn("growing {} from 0x{x} to 0x{x}\n", .{ decl.name, local_sym.st_value, vaddr });
+ //std.log.debug(.link, "growing {} from 0x{x} to 0x{x}\n", .{ decl.name, local_sym.st_value, vaddr });
if (vaddr != local_sym.st_value) {
local_sym.st_value = vaddr;
- //std.debug.warn(" (writing new offset table entry)\n", .{});
+ //std.log.debug(.link, " (writing new offset table entry)\n", .{});
self.offset_table.items[decl.link.offset_table_index] = vaddr;
try self.writeOffsetTableEntry(decl.link.offset_table_index);
}
@@ -1049,7 +1049,7 @@ pub const ElfFile = struct {
const decl_name = mem.spanZ(decl.name);
const name_str_index = try self.makeString(decl_name);
const vaddr = try self.allocateTextBlock(&decl.link, code.len, required_alignment);
- //std.debug.warn("allocated text block for {} at 0x{x}\n", .{ decl_name, vaddr });
+ //std.log.debug(.link, "allocated text block for {} at 0x{x}\n", .{ decl_name, vaddr });
errdefer self.freeTextBlock(&decl.link);
local_sym.* = .{
@@ -1307,7 +1307,6 @@ pub const ElfFile = struct {
.p32 => @sizeOf(elf.Elf32_Sym),
.p64 => @sizeOf(elf.Elf64_Sym),
};
- //std.debug.warn("symtab start=0x{x} end=0x{x}\n", .{ syms_sect.sh_offset, syms_sect.sh_offset + needed_size });
const foreign_endian = self.options.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
const global_syms_off = syms_sect.sh_offset + self.local_symbols.items.len * sym_size;
switch (self.ptr_width) {
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 085e644d7f..3743b4f334 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -38,6 +38,29 @@ const usage =
\\
;
+pub fn log(
+ comptime level: std.log.Level,
+ comptime scope: @TypeOf(.EnumLiteral),
+ comptime format: []const u8,
+ args: var,
+) void {
+ if (@enumToInt(level) > @enumToInt(std.log.level))
+ return;
+
+ const scope_prefix = "(" ++ switch (scope) {
+ // Uncomment to hide logs
+ //.compiler,
+ .link => return,
+
+ else => @tagName(scope),
+ } ++ "): ";
+
+ const prefix = "[" ++ @tagName(level) ++ "] " ++ scope_prefix;
+
+ // Print the message to stderr, silently ignoring any errors
+ std.debug.print(prefix ++ format, args);
+}
+
pub fn main() !void {
// TODO general purpose allocator in the zig std lib
const gpa = if (std.builtin.link_libc) std.heap.c_allocator else std.heap.page_allocator;
@@ -86,7 +109,7 @@ const usage_build_generic =
\\ zig build-obj <options> [files]
\\
\\Supported file types:
- \\ (planned) .zig Zig source code
+ \\ .zig Zig source code
\\ .zir Zig Intermediate Representation code
\\ (planned) .o ELF object file
\\ (planned) .o MACH-O (macOS) object file
@@ -407,21 +430,7 @@ fn buildOutputType(
std.debug.warn("-fno-emit-bin not supported yet", .{});
process.exit(1);
},
- .yes_default_path => switch (output_mode) {
- .Exe => try std.fmt.allocPrint(arena, "{}{}", .{ root_name, target_info.target.exeFileExt() }),
- .Lib => blk: {
- const suffix = switch (link_mode orelse .Static) {
- .Static => target_info.target.staticLibSuffix(),
- .Dynamic => target_info.target.dynamicLibSuffix(),
- };
- break :blk try std.fmt.allocPrint(arena, "{}{}{}", .{
- target_info.target.libPrefix(),
- root_name,
- suffix,
- });
- },
- .Obj => try std.fmt.allocPrint(arena, "{}{}", .{ root_name, target_info.target.oFileExt() }),
- },
+ .yes_default_path => try std.zig.binNameAlloc(arena, root_name, target_info.target, output_mode, link_mode),
.yes => |p| p,
};
@@ -450,6 +459,7 @@ fn buildOutputType(
.link_mode = link_mode,
.object_format = object_format,
.optimize_mode = build_mode,
+ .keep_source_files_loaded = zir_out_path != null,
});
defer module.deinit();
@@ -487,7 +497,9 @@ fn buildOutputType(
}
fn updateModule(gpa: *Allocator, module: *Module, zir_out_path: ?[]const u8) !void {
+ var timer = try std.time.Timer.start();
try module.update();
+ const update_nanos = timer.read();
var errors = try module.getAllErrorsAlloc();
defer errors.deinit(module.allocator);
@@ -501,6 +513,8 @@ fn updateModule(gpa: *Allocator, module: *Module, zir_out_path: ?[]const u8) !vo
full_err_msg.msg,
});
}
+ } else {
+ std.log.info(.compiler, "Update completed in {} ms\n", .{update_nanos / std.time.ns_per_ms});
}
if (zir_out_path) |zop| {
diff --git a/src-self-hosted/test.zig b/src-self-hosted/test.zig
index 4cf72ce481..14804cab68 100644
--- a/src-self-hosted/test.zig
+++ b/src-self-hosted/test.zig
@@ -21,32 +21,7 @@ const ErrorMsg = struct {
};
pub const TestContext = struct {
- // TODO: remove these. They are deprecated.
- zir_cmp_output_cases: std.ArrayList(ZIRCompareOutputCase),
-
- /// TODO: find a way to treat cases as individual tests (shouldn't show "1 test passed" if there are 200 cases)
- zir_cases: std.ArrayList(ZIRCase),
-
- // TODO: remove
- pub const ZIRCompareOutputCase = struct {
- name: []const u8,
- src_list: []const []const u8,
- expected_stdout_list: []const []const u8,
- };
-
- pub const ZIRUpdateType = enum {
- /// A transformation update transforms the input ZIR and tests against
- /// the expected output
- Transformation,
- /// An error update attempts to compile bad code, and ensures that it
- /// fails to compile, and for the expected reasons
- Error,
- /// An execution update compiles and runs the input ZIR, feeding in
- /// provided input and ensuring that the outputs match what is expected
- Execution,
- /// A compilation update checks that the ZIR compiles without any issues
- Compiles,
- };
+ zir_cases: std.ArrayList(Case),
pub const ZIRUpdate = struct {
/// The input to the current update. We simulate an incremental update
@@ -57,58 +32,55 @@ pub const TestContext = struct {
/// you can keep it mostly consistent, with small changes, testing the
/// effects of the incremental compilation.
src: [:0]const u8,
- case: union(ZIRUpdateType) {
- /// The expected output ZIR
+ case: union(enum) {
+ /// A transformation update transforms the input ZIR and tests against
+ /// the expected output ZIR.
Transformation: [:0]const u8,
+ /// An error update attempts to compile bad code, and ensures that it
+ /// fails to compile, and for the expected reasons.
/// A slice containing the expected errors *in sequential order*.
Error: []const ErrorMsg,
-
- /// Input to feed to the program, and expected outputs.
- ///
- /// If stdout, stderr, and exit_code are all null, addZIRCase will
- /// discard the test. To test for successful compilation, use a
- /// dedicated Compile update instead.
- Execution: struct {
- stdin: ?[]const u8,
- stdout: ?[]const u8,
- stderr: ?[]const u8,
- exit_code: ?u8,
- },
- /// A Compiles test checks only that compilation of the given ZIR
- /// succeeds. To test outputs, use an Execution test. It is good to
- /// use a Compiles test before an Execution, as the overhead should
- /// be low (due to incremental compilation) and TODO: provide a way
- /// to check changed / new / etc decls in testing mode
- /// (usingnamespace a debug info struct with a comptime flag?)
- Compiles: void,
+ /// An execution update compiles and runs the input ZIR, feeding in
+ /// provided input and ensuring that the stdout match what is expected.
+ Execution: []const u8,
},
};
- /// A ZIRCase consists of a set of *updates*. A update can transform ZIR,
+ /// A Case consists of a set of *updates*. A update can transform ZIR,
/// compile it, ensure that compilation fails, and more. The same Module is
/// used for each update, so each update's source is treated as a single file
/// being updated by the test harness and incrementally compiled.
- pub const ZIRCase = struct {
+ pub const Case = struct {
name: []const u8,
/// The platform the ZIR targets. For non-native platforms, an emulator
/// such as QEMU is required for tests to complete.
target: std.zig.CrossTarget,
updates: std.ArrayList(ZIRUpdate),
+ output_mode: std.builtin.OutputMode,
+ /// Either ".zir" or ".zig"
+ extension: [4]u8,
/// Adds a subcase in which the module is updated with new ZIR, and the
/// resulting ZIR is validated.
- pub fn addTransform(self: *ZIRCase, src: [:0]const u8, result: [:0]const u8) void {
+ pub fn addTransform(self: *Case, src: [:0]const u8, result: [:0]const u8) void {
self.updates.append(.{
.src = src,
.case = .{ .Transformation = result },
}) catch unreachable;
}
+ pub fn addCompareOutput(self: *Case, src: [:0]const u8, result: []const u8) void {
+ self.updates.append(.{
+ .src = src,
+ .case = .{ .Execution = result },
+ }) catch unreachable;
+ }
+
/// Adds a subcase in which the module is updated with invalid ZIR, and
/// ensures that compilation fails for the expected reasons.
///
/// Errors must be specified in sequential order.
- pub fn addError(self: *ZIRCase, src: [:0]const u8, errors: []const []const u8) void {
+ pub fn addError(self: *Case, src: [:0]const u8, errors: []const []const u8) void {
var array = self.updates.allocator.alloc(ErrorMsg, errors.len) catch unreachable;
for (errors) |e, i| {
if (e[0] != ':') {
@@ -146,15 +118,65 @@ pub const TestContext = struct {
}
};
- pub fn addZIRMulti(
+ pub fn addExeZIR(
+ ctx: *TestContext,
+ name: []const u8,
+ target: std.zig.CrossTarget,
+ ) *Case {
+ const case = Case{
+ .name = name,
+ .target = target,
+ .updates = std.ArrayList(ZIRUpdate).init(ctx.zir_cases.allocator),
+ .output_mode = .Exe,
+ .extension = ".zir".*,
+ };
+ ctx.zir_cases.append(case) catch unreachable;
+ return &ctx.zir_cases.items[ctx.zir_cases.items.len - 1];
+ }
+
+ pub fn addObjZIR(
ctx: *TestContext,
name: []const u8,
target: std.zig.CrossTarget,
- ) *ZIRCase {
- const case = ZIRCase{
+ ) *Case {
+ const case = Case{
.name = name,
.target = target,
.updates = std.ArrayList(ZIRUpdate).init(ctx.zir_cases.allocator),
+ .output_mode = .Obj,
+ .extension = ".zir".*,
+ };
+ ctx.zir_cases.append(case) catch unreachable;
+ return &ctx.zir_cases.items[ctx.zir_cases.items.len - 1];
+ }
+
+ pub fn addExe(
+ ctx: *TestContext,
+ name: []const u8,
+ target: std.zig.CrossTarget,
+ ) *Case {
+ const case = Case{
+ .name = name,
+ .target = target,
+ .updates = std.ArrayList(ZIRUpdate).init(ctx.zir_cases.allocator),
+ .output_mode = .Exe,
+ .extension = ".zig".*,
+ };
+ ctx.zir_cases.append(case) catch unreachable;
+ return &ctx.zir_cases.items[ctx.zir_cases.items.len - 1];
+ }
+
+ pub fn addObj(
+ ctx: *TestContext,
+ name: []const u8,
+ target: std.zig.CrossTarget,
+ ) *Case {
+ const case = Case{
+ .name = name,
+ .target = target,
+ .updates = std.ArrayList(ZIRUpdate).init(ctx.zir_cases.allocator),
+ .output_mode = .Obj,
+ .extension = ".zig".*,
};
ctx.zir_cases.append(case) catch unreachable;
return &ctx.zir_cases.items[ctx.zir_cases.items.len - 1];
@@ -163,14 +185,21 @@ pub const TestContext = struct {
pub fn addZIRCompareOutput(
ctx: *TestContext,
name: []const u8,
- src_list: []const []const u8,
- expected_stdout_list: []const []const u8,
+ src: [:0]const u8,
+ expected_stdout: []const u8,
) void {
- ctx.zir_cmp_output_cases.append(.{
- .name = name,
- .src_list = src_list,
- .expected_stdout_list = expected_stdout_list,
- }) catch unreachable;
+ var c = ctx.addExeZIR(name, .{});
+ c.addCompareOutput(src, expected_stdout);
+ }
+
+ pub fn addCompareOutput(
+ ctx: *TestContext,
+ name: []const u8,
+ src: [:0]const u8,
+ expected_stdout: []const u8,
+ ) void {
+ var c = ctx.addExe(name, .{});
+ c.addCompareOutput(src, expected_stdout);
}
pub fn addZIRTransform(
@@ -180,7 +209,7 @@ pub const TestContext = struct {
src: [:0]const u8,
result: [:0]const u8,
) void {
- var c = ctx.addZIRMulti(name, target);
+ var c = ctx.addObjZIR(name, target);
c.addTransform(src, result);
}
@@ -191,20 +220,18 @@ pub const TestContext = struct {
src: [:0]const u8,
expected_errors: []const []const u8,
) void {
- var c = ctx.addZIRMulti(name, target);
+ var c = ctx.addObjZIR(name, target);
c.addError(src, expected_errors);
}
fn init() TestContext {
const allocator = std.heap.page_allocator;
return .{
- .zir_cmp_output_cases = std.ArrayList(ZIRCompareOutputCase).init(allocator),
- .zir_cases = std.ArrayList(ZIRCase).init(allocator),
+ .zir_cases = std.ArrayList(Case).init(allocator),
};
}
fn deinit(self: *TestContext) void {
- self.zir_cmp_output_cases.deinit();
for (self.zir_cases.items) |c| {
for (c.updates.items) |u| {
if (u.case == .Error) {
@@ -226,30 +253,32 @@ pub const TestContext = struct {
for (self.zir_cases.items) |case| {
std.testing.base_allocator_instance.reset();
- const info = try std.zig.system.NativeTargetInfo.detect(std.testing.allocator, case.target);
- try self.runOneZIRCase(std.testing.allocator, root_node, case, info.target);
- try std.testing.allocator_instance.validate();
- }
- // TODO: wipe the rest of this function
- for (self.zir_cmp_output_cases.items) |case| {
- std.testing.base_allocator_instance.reset();
- try self.runOneZIRCmpOutputCase(std.testing.allocator, root_node, case, native_info.target);
+ var prg_node = root_node.start(case.name, case.updates.items.len);
+ prg_node.activate();
+ defer prg_node.end();
+
+ // So that we can see which test case failed when the leak checker goes off.
+ progress.refresh();
+
+ const info = try std.zig.system.NativeTargetInfo.detect(std.testing.allocator, case.target);
+ try self.runOneCase(std.testing.allocator, &prg_node, case, info.target);
try std.testing.allocator_instance.validate();
}
}
- fn runOneZIRCase(self: *TestContext, allocator: *Allocator, root_node: *std.Progress.Node, case: ZIRCase, target: std.Target) !void {
+ fn runOneCase(self: *TestContext, allocator: *Allocator, prg_node: *std.Progress.Node, case: Case, target: std.Target) !void {
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
- const tmp_src_path = "test_case.zir";
+ const root_name = "test_case";
+ const tmp_src_path = try std.fmt.allocPrint(allocator, "{}{}", .{ root_name, case.extension });
+ defer allocator.free(tmp_src_path);
const root_pkg = try Package.create(allocator, tmp.dir, ".", tmp_src_path);
defer root_pkg.destroy();
- var prg_node = root_node.start(case.name, case.updates.items.len);
- prg_node.activate();
- defer prg_node.end();
+ const bin_name = try std.zig.binNameAlloc(allocator, root_name, target, case.output_mode, null);
+ defer allocator.free(bin_name);
var module = try Module.init(allocator, .{
.target = target,
@@ -259,16 +288,17 @@ pub const TestContext = struct {
// TODO: support tests for object file building, and library builds
// and linking. This will require a rework to support multi-file
// tests.
- .output_mode = .Obj,
+ .output_mode = case.output_mode,
// TODO: support testing optimizations
.optimize_mode = .Debug,
.bin_file_dir = tmp.dir,
- .bin_file_path = "test_case.o",
+ .bin_file_path = bin_name,
.root_pkg = root_pkg,
+ .keep_source_files_loaded = true,
});
defer module.deinit();
- for (case.updates.items) |update| {
+ for (case.updates.items) |update, update_index| {
var update_node = prg_node.start("update", 4);
update_node.activate();
defer update_node.end();
@@ -280,6 +310,7 @@ pub const TestContext = struct {
var module_node = update_node.start("parse/analysis/codegen", null);
module_node.activate();
+ try module.makeBinFileWritable();
try module.update();
module_node.end();
@@ -328,82 +359,41 @@ pub const TestContext = struct {
}
}
},
-
- else => return error.unimplemented,
- }
- }
- }
-
- fn runOneZIRCmpOutputCase(
- self: *TestContext,
- allocator: *Allocator,
- root_node: *std.Progress.Node,
- case: ZIRCompareOutputCase,
- target: std.Target,
- ) !void {
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
-
- const tmp_src_path = "test-case.zir";
- const root_pkg = try Package.create(allocator, tmp.dir, ".", tmp_src_path);
- defer root_pkg.destroy();
-
- var prg_node = root_node.start(case.name, case.src_list.len);
- prg_node.activate();
- defer prg_node.end();
-
- var module = try Module.init(allocator, .{
- .target = target,
- .output_mode = .Exe,
- .optimize_mode = .Debug,
- .bin_file_dir = tmp.dir,
- .bin_file_path = "a.out",
- .root_pkg = root_pkg,
- });
- defer module.deinit();
-
- for (case.src_list) |source, i| {
- var src_node = prg_node.start("update", 2);
- src_node.activate();
- defer src_node.end();
-
- try tmp.dir.writeFile(tmp_src_path, source);
-
- var update_node = src_node.start("parse,analysis,codegen", null);
- update_node.activate();
- try module.makeBinFileWritable();
- try module.update();
- update_node.end();
-
- var exec_result = x: {
- var exec_node = src_node.start("execute", null);
- exec_node.activate();
- defer exec_node.end();
-
- try module.makeBinFileExecutable();
- break :x try std.ChildProcess.exec(.{
- .allocator = allocator,
- .argv = &[_][]const u8{"./a.out"},
- .cwd_dir = tmp.dir,
- });
- };
- defer allocator.free(exec_result.stdout);
- defer allocator.free(exec_result.stderr);
- switch (exec_result.term) {
- .Exited => |code| {
- if (code != 0) {
- std.debug.warn("elf file exited with code {}\n", .{code});
- return error.BinaryBadExitCode;
+ .Execution => |expected_stdout| {
+ var exec_result = x: {
+ var exec_node = update_node.start("execute", null);
+ exec_node.activate();
+ defer exec_node.end();
+
+ try module.makeBinFileExecutable();
+
+ const exe_path = try std.fmt.allocPrint(allocator, "." ++ std.fs.path.sep_str ++ "{}", .{bin_name});
+ defer allocator.free(exe_path);
+
+ break :x try std.ChildProcess.exec(.{
+ .allocator = allocator,
+ .argv = &[_][]const u8{exe_path},
+ .cwd_dir = tmp.dir,
+ });
+ };
+ defer allocator.free(exec_result.stdout);
+ defer allocator.free(exec_result.stderr);
+ switch (exec_result.term) {
+ .Exited => |code| {
+ if (code != 0) {
+ std.debug.warn("elf file exited with code {}\n", .{code});
+ return error.BinaryBadExitCode;
+ }
+ },
+ else => return error.BinaryCrashed,
+ }
+ if (!std.mem.eql(u8, expected_stdout, exec_result.stdout)) {
+ std.debug.panic(
+ "update index {}, mismatched stdout\n====Expected (len={}):====\n{}\n====Actual (len={}):====\n{}\n========\n",
+ .{ update_index, expected_stdout.len, expected_stdout, exec_result.stdout.len, exec_result.stdout },
+ );
}
},
- else => return error.BinaryCrashed,
- }
- const expected_stdout = case.expected_stdout_list[i];
- if (!std.mem.eql(u8, expected_stdout, exec_result.stdout)) {
- std.debug.panic(
- "update index {}, mismatched stdout\n====Expected (len={}):====\n{}\n====Actual (len={}):====\n{}\n========\n",
- .{ i, expected_stdout.len, expected_stdout, exec_result.stdout.len, exec_result.stdout },
- );
}
}
}
diff --git a/src-self-hosted/tracy.zig b/src-self-hosted/tracy.zig
new file mode 100644
index 0000000000..6f56a87ce6
--- /dev/null
+++ b/src-self-hosted/tracy.zig
@@ -0,0 +1,45 @@
+pub const std = @import("std");
+
+pub const enable = if (std.builtin.is_test) false else @import("build_options").enable_tracy;
+
+extern fn ___tracy_emit_zone_begin_callstack(
+ srcloc: *const ___tracy_source_location_data,
+ depth: c_int,
+ active: c_int,
+) ___tracy_c_zone_context;
+
+extern fn ___tracy_emit_zone_end(ctx: ___tracy_c_zone_context) void;
+
+pub const ___tracy_source_location_data = extern struct {
+ name: ?[*:0]const u8,
+ function: [*:0]const u8,
+ file: [*:0]const u8,
+ line: u32,
+ color: u32,
+};
+
+pub const ___tracy_c_zone_context = extern struct {
+ id: u32,
+ active: c_int,
+
+ pub fn end(self: ___tracy_c_zone_context) void {
+ ___tracy_emit_zone_end(self);
+ }
+};
+
+pub const Ctx = if (enable) ___tracy_c_zone_context else struct {
+ pub fn end(self: Ctx) void {}
+};
+
+pub inline fn trace(comptime src: std.builtin.SourceLocation) Ctx {
+ if (!enable) return .{};
+
+ const loc: ___tracy_source_location_data = .{
+ .name = null,
+ .function = src.fn_name.ptr,
+ .file = src.file.ptr,
+ .line = src.line,
+ .color = 0,
+ };
+ return ___tracy_emit_zone_begin_callstack(&loc, 1, 1);
+}
diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig
index aa8c000095..fb97186648 100644
--- a/src-self-hosted/type.zig
+++ b/src-self-hosted/type.zig
@@ -54,6 +54,7 @@ pub const Type = extern union {
.@"undefined" => return .Undefined,
.fn_noreturn_no_args => return .Fn,
+ .fn_void_no_args => return .Fn,
.fn_naked_noreturn_no_args => return .Fn,
.fn_ccc_void_no_args => return .Fn,
@@ -112,6 +113,12 @@ pub const Type = extern union {
.Undefined => return true,
.Null => return true,
.Pointer => {
+ // Hot path for common case:
+ if (a.cast(Payload.SingleConstPointer)) |a_payload| {
+ if (b.cast(Payload.SingleConstPointer)) |b_payload| {
+ return eql(a_payload.pointee_type, b_payload.pointee_type);
+ }
+ }
const is_slice_a = isSlice(a);
const is_slice_b = isSlice(b);
if (is_slice_a != is_slice_b)
@@ -163,6 +170,77 @@ pub const Type = extern union {
}
}
+ pub fn copy(self: Type, allocator: *Allocator) error{OutOfMemory}!Type {
+ if (self.tag_if_small_enough < Tag.no_payload_count) {
+ return Type{ .tag_if_small_enough = self.tag_if_small_enough };
+ } else switch (self.ptr_otherwise.tag) {
+ .u8,
+ .i8,
+ .isize,
+ .usize,
+ .c_short,
+ .c_ushort,
+ .c_int,
+ .c_uint,
+ .c_long,
+ .c_ulong,
+ .c_longlong,
+ .c_ulonglong,
+ .c_longdouble,
+ .c_void,
+ .f16,
+ .f32,
+ .f64,
+ .f128,
+ .bool,
+ .void,
+ .type,
+ .anyerror,
+ .comptime_int,
+ .comptime_float,
+ .noreturn,
+ .@"null",
+ .@"undefined",
+ .fn_noreturn_no_args,
+ .fn_void_no_args,
+ .fn_naked_noreturn_no_args,
+ .fn_ccc_void_no_args,
+ .single_const_pointer_to_comptime_int,
+ .const_slice_u8,
+ => unreachable,
+
+ .array_u8_sentinel_0 => return self.copyPayloadShallow(allocator, Payload.Array_u8_Sentinel0),
+ .array => {
+ const payload = @fieldParentPtr(Payload.Array, "base", self.ptr_otherwise);
+ const new_payload = try allocator.create(Payload.Array);
+ new_payload.* = .{
+ .base = payload.base,
+ .len = payload.len,
+ .elem_type = try payload.elem_type.copy(allocator),
+ };
+ return Type{ .ptr_otherwise = &new_payload.base };
+ },
+ .single_const_pointer => {
+ const payload = @fieldParentPtr(Payload.SingleConstPointer, "base", self.ptr_otherwise);
+ const new_payload = try allocator.create(Payload.SingleConstPointer);
+ new_payload.* = .{
+ .base = payload.base,
+ .pointee_type = try payload.pointee_type.copy(allocator),
+ };
+ return Type{ .ptr_otherwise = &new_payload.base };
+ },
+ .int_signed => return self.copyPayloadShallow(allocator, Payload.IntSigned),
+ .int_unsigned => return self.copyPayloadShallow(allocator, Payload.IntUnsigned),
+ }
+ }
+
+ fn copyPayloadShallow(self: Type, allocator: *Allocator, comptime T: type) error{OutOfMemory}!Type {
+ const payload = @fieldParentPtr(T, "base", self.ptr_otherwise);
+ const new_payload = try allocator.create(T);
+ new_payload.* = payload.*;
+ return Type{ .ptr_otherwise = &new_payload.base };
+ }
+
pub fn format(
self: Type,
comptime fmt: []const u8,
@@ -206,6 +284,7 @@ pub const Type = extern union {
.const_slice_u8 => return out_stream.writeAll("[]const u8"),
.fn_noreturn_no_args => return out_stream.writeAll("fn() noreturn"),
+ .fn_void_no_args => return out_stream.writeAll("fn() void"),
.fn_naked_noreturn_no_args => return out_stream.writeAll("fn() callconv(.Naked) noreturn"),
.fn_ccc_void_no_args => return out_stream.writeAll("fn() callconv(.C) void"),
.single_const_pointer_to_comptime_int => return out_stream.writeAll("*const comptime_int"),
@@ -269,6 +348,7 @@ pub const Type = extern union {
.@"null" => return Value.initTag(.null_type),
.@"undefined" => return Value.initTag(.undefined_type),
.fn_noreturn_no_args => return Value.initTag(.fn_noreturn_no_args_type),
+ .fn_void_no_args => return Value.initTag(.fn_void_no_args_type),
.fn_naked_noreturn_no_args => return Value.initTag(.fn_naked_noreturn_no_args_type),
.fn_ccc_void_no_args => return Value.initTag(.fn_ccc_void_no_args_type),
.single_const_pointer_to_comptime_int => return Value.initTag(.single_const_pointer_to_comptime_int_type),
@@ -303,6 +383,7 @@ pub const Type = extern union {
.bool,
.anyerror,
.fn_noreturn_no_args,
+ .fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.single_const_pointer_to_comptime_int,
@@ -333,6 +414,7 @@ pub const Type = extern union {
.i8,
.bool,
.fn_noreturn_no_args, // represents machine code; not a pointer
+ .fn_void_no_args, // represents machine code; not a pointer
.fn_naked_noreturn_no_args, // represents machine code; not a pointer
.fn_ccc_void_no_args, // represents machine code; not a pointer
.array_u8_sentinel_0,
@@ -420,6 +502,7 @@ pub const Type = extern union {
.array_u8_sentinel_0,
.const_slice_u8,
.fn_noreturn_no_args,
+ .fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.int_unsigned,
@@ -466,6 +549,7 @@ pub const Type = extern union {
.single_const_pointer,
.single_const_pointer_to_comptime_int,
.fn_noreturn_no_args,
+ .fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.int_unsigned,
@@ -509,6 +593,7 @@ pub const Type = extern union {
.array,
.array_u8_sentinel_0,
.fn_noreturn_no_args,
+ .fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.int_unsigned,
@@ -553,6 +638,7 @@ pub const Type = extern union {
.@"null",
.@"undefined",
.fn_noreturn_no_args,
+ .fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.int_unsigned,
@@ -597,6 +683,7 @@ pub const Type = extern union {
.@"null",
.@"undefined",
.fn_noreturn_no_args,
+ .fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.single_const_pointer,
@@ -642,6 +729,7 @@ pub const Type = extern union {
.@"null",
.@"undefined",
.fn_noreturn_no_args,
+ .fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.single_const_pointer,
@@ -675,6 +763,7 @@ pub const Type = extern union {
.@"null",
.@"undefined",
.fn_noreturn_no_args,
+ .fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.array,
@@ -721,6 +810,7 @@ pub const Type = extern union {
.@"null",
.@"undefined",
.fn_noreturn_no_args,
+ .fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.array,
@@ -777,6 +867,7 @@ pub const Type = extern union {
pub fn fnParamLen(self: Type) usize {
return switch (self.tag()) {
.fn_noreturn_no_args => 0,
+ .fn_void_no_args => 0,
.fn_naked_noreturn_no_args => 0,
.fn_ccc_void_no_args => 0,
@@ -823,6 +914,7 @@ pub const Type = extern union {
pub fn fnParamTypes(self: Type, types: []Type) void {
switch (self.tag()) {
.fn_noreturn_no_args => return,
+ .fn_void_no_args => return,
.fn_naked_noreturn_no_args => return,
.fn_ccc_void_no_args => return,
@@ -869,7 +961,10 @@ pub const Type = extern union {
return switch (self.tag()) {
.fn_noreturn_no_args => Type.initTag(.noreturn),
.fn_naked_noreturn_no_args => Type.initTag(.noreturn),
- .fn_ccc_void_no_args => Type.initTag(.void),
+
+ .fn_void_no_args,
+ .fn_ccc_void_no_args,
+ => Type.initTag(.void),
.f16,
.f32,
@@ -913,6 +1008,7 @@ pub const Type = extern union {
pub fn fnCallingConvention(self: Type) std.builtin.CallingConvention {
return switch (self.tag()) {
.fn_noreturn_no_args => .Unspecified,
+ .fn_void_no_args => .Unspecified,
.fn_naked_noreturn_no_args => .Naked,
.fn_ccc_void_no_args => .C,
@@ -958,6 +1054,7 @@ pub const Type = extern union {
pub fn fnIsVarArgs(self: Type) bool {
return switch (self.tag()) {
.fn_noreturn_no_args => false,
+ .fn_void_no_args => false,
.fn_naked_noreturn_no_args => false,
.fn_ccc_void_no_args => false,
@@ -1033,6 +1130,7 @@ pub const Type = extern union {
.@"null",
.@"undefined",
.fn_noreturn_no_args,
+ .fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.array,
@@ -1070,6 +1168,7 @@ pub const Type = extern union {
.type,
.anyerror,
.fn_noreturn_no_args,
+ .fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.single_const_pointer_to_comptime_int,
@@ -1126,6 +1225,7 @@ pub const Type = extern union {
.type,
.anyerror,
.fn_noreturn_no_args,
+ .fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.single_const_pointer_to_comptime_int,
@@ -1180,6 +1280,7 @@ pub const Type = extern union {
@"null",
@"undefined",
fn_noreturn_no_args,
+ fn_void_no_args,
fn_naked_noreturn_no_args,
fn_ccc_void_no_args,
single_const_pointer_to_comptime_int,
diff --git a/src-self-hosted/value.zig b/src-self-hosted/value.zig
index 5660cd760f..fc5854c40f 100644
--- a/src-self-hosted/value.zig
+++ b/src-self-hosted/value.zig
@@ -49,6 +49,7 @@ pub const Value = extern union {
null_type,
undefined_type,
fn_noreturn_no_args_type,
+ fn_void_no_args_type,
fn_naked_noreturn_no_args_type,
fn_ccc_void_no_args_type,
single_const_pointer_to_comptime_int_type,
@@ -78,8 +79,8 @@ pub const Value = extern union {
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
};
- pub fn initTag(comptime small_tag: Tag) Value {
- comptime assert(@enumToInt(small_tag) < Tag.no_payload_count);
+ pub fn initTag(small_tag: Tag) Value {
+ assert(@enumToInt(small_tag) < Tag.no_payload_count);
return .{ .tag_if_small_enough = @enumToInt(small_tag) };
}
@@ -107,6 +108,109 @@ pub const Value = extern union {
return @fieldParentPtr(T, "base", self.ptr_otherwise);
}
+ pub fn copy(self: Value, allocator: *Allocator) error{OutOfMemory}!Value {
+ if (self.tag_if_small_enough < Tag.no_payload_count) {
+ return Value{ .tag_if_small_enough = self.tag_if_small_enough };
+ } else switch (self.ptr_otherwise.tag) {
+ .u8_type,
+ .i8_type,
+ .isize_type,
+ .usize_type,
+ .c_short_type,
+ .c_ushort_type,
+ .c_int_type,
+ .c_uint_type,
+ .c_long_type,
+ .c_ulong_type,
+ .c_longlong_type,
+ .c_ulonglong_type,
+ .c_longdouble_type,
+ .f16_type,
+ .f32_type,
+ .f64_type,
+ .f128_type,
+ .c_void_type,
+ .bool_type,
+ .void_type,
+ .type_type,
+ .anyerror_type,
+ .comptime_int_type,
+ .comptime_float_type,
+ .noreturn_type,
+ .null_type,
+ .undefined_type,
+ .fn_noreturn_no_args_type,
+ .fn_void_no_args_type,
+ .fn_naked_noreturn_no_args_type,
+ .fn_ccc_void_no_args_type,
+ .single_const_pointer_to_comptime_int_type,
+ .const_slice_u8_type,
+ .undef,
+ .zero,
+ .the_one_possible_value,
+ .null_value,
+ .bool_true,
+ .bool_false,
+ => unreachable,
+
+ .ty => {
+ const payload = @fieldParentPtr(Payload.Ty, "base", self.ptr_otherwise);
+ const new_payload = try allocator.create(Payload.Ty);
+ new_payload.* = .{
+ .base = payload.base,
+ .ty = try payload.ty.copy(allocator),
+ };
+ return Value{ .ptr_otherwise = &new_payload.base };
+ },
+ .int_u64 => return self.copyPayloadShallow(allocator, Payload.Int_u64),
+ .int_i64 => return self.copyPayloadShallow(allocator, Payload.Int_i64),
+ .int_big_positive => {
+ @panic("TODO implement copying of big ints");
+ },
+ .int_big_negative => {
+ @panic("TODO implement copying of big ints");
+ },
+ .function => return self.copyPayloadShallow(allocator, Payload.Function),
+ .ref_val => {
+ const payload = @fieldParentPtr(Payload.RefVal, "base", self.ptr_otherwise);
+ const new_payload = try allocator.create(Payload.RefVal);
+ new_payload.* = .{
+ .base = payload.base,
+ .val = try payload.val.copy(allocator),
+ };
+ return Value{ .ptr_otherwise = &new_payload.base };
+ },
+ .decl_ref => return self.copyPayloadShallow(allocator, Payload.DeclRef),
+ .elem_ptr => {
+ const payload = @fieldParentPtr(Payload.ElemPtr, "base", self.ptr_otherwise);
+ const new_payload = try allocator.create(Payload.ElemPtr);
+ new_payload.* = .{
+ .base = payload.base,
+ .array_ptr = try payload.array_ptr.copy(allocator),
+ .index = payload.index,
+ };
+ return Value{ .ptr_otherwise = &new_payload.base };
+ },
+ .bytes => return self.copyPayloadShallow(allocator, Payload.Bytes),
+ .repeated => {
+ const payload = @fieldParentPtr(Payload.Repeated, "base", self.ptr_otherwise);
+ const new_payload = try allocator.create(Payload.Repeated);
+ new_payload.* = .{
+ .base = payload.base,
+ .val = try payload.val.copy(allocator),
+ };
+ return Value{ .ptr_otherwise = &new_payload.base };
+ },
+ }
+ }
+
+ fn copyPayloadShallow(self: Value, allocator: *Allocator, comptime T: type) error{OutOfMemory}!Value {
+ const payload = @fieldParentPtr(T, "base", self.ptr_otherwise);
+ const new_payload = try allocator.create(T);
+ new_payload.* = payload.*;
+ return Value{ .ptr_otherwise = &new_payload.base };
+ }
+
pub fn format(
self: Value,
comptime fmt: []const u8,
@@ -144,6 +248,7 @@ pub const Value = extern union {
.null_type => return out_stream.writeAll("@TypeOf(null)"),
.undefined_type => return out_stream.writeAll("@TypeOf(undefined)"),
.fn_noreturn_no_args_type => return out_stream.writeAll("fn() noreturn"),
+ .fn_void_no_args_type => return out_stream.writeAll("fn() void"),
.fn_naked_noreturn_no_args_type => return out_stream.writeAll("fn() callconv(.Naked) noreturn"),
.fn_ccc_void_no_args_type => return out_stream.writeAll("fn() callconv(.C) void"),
.single_const_pointer_to_comptime_int_type => return out_stream.writeAll("*const comptime_int"),
@@ -229,6 +334,7 @@ pub const Value = extern union {
.null_type => Type.initTag(.@"null"),
.undefined_type => Type.initTag(.@"undefined"),
.fn_noreturn_no_args_type => Type.initTag(.fn_noreturn_no_args),
+ .fn_void_no_args_type => Type.initTag(.fn_void_no_args),
.fn_naked_noreturn_no_args_type => Type.initTag(.fn_naked_noreturn_no_args),
.fn_ccc_void_no_args_type => Type.initTag(.fn_ccc_void_no_args),
.single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int),
@@ -286,6 +392,7 @@ pub const Value = extern union {
.null_type,
.undefined_type,
.fn_noreturn_no_args_type,
+ .fn_void_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
.single_const_pointer_to_comptime_int_type,
@@ -345,6 +452,7 @@ pub const Value = extern union {
.null_type,
.undefined_type,
.fn_noreturn_no_args_type,
+ .fn_void_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
.single_const_pointer_to_comptime_int_type,
@@ -405,6 +513,7 @@ pub const Value = extern union {
.null_type,
.undefined_type,
.fn_noreturn_no_args_type,
+ .fn_void_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
.single_const_pointer_to_comptime_int_type,
@@ -470,6 +579,7 @@ pub const Value = extern union {
.null_type,
.undefined_type,
.fn_noreturn_no_args_type,
+ .fn_void_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
.single_const_pointer_to_comptime_int_type,
@@ -564,6 +674,7 @@ pub const Value = extern union {
.null_type,
.undefined_type,
.fn_noreturn_no_args_type,
+ .fn_void_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
.single_const_pointer_to_comptime_int_type,
@@ -620,6 +731,7 @@ pub const Value = extern union {
.null_type,
.undefined_type,
.fn_noreturn_no_args_type,
+ .fn_void_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
.single_const_pointer_to_comptime_int_type,
@@ -721,6 +833,7 @@ pub const Value = extern union {
.null_type,
.undefined_type,
.fn_noreturn_no_args_type,
+ .fn_void_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
.single_const_pointer_to_comptime_int_type,
@@ -783,6 +896,7 @@ pub const Value = extern union {
.null_type,
.undefined_type,
.fn_noreturn_no_args_type,
+ .fn_void_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
.single_const_pointer_to_comptime_int_type,
@@ -862,6 +976,7 @@ pub const Value = extern union {
.null_type,
.undefined_type,
.fn_noreturn_no_args_type,
+ .fn_void_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
.single_const_pointer_to_comptime_int_type,
@@ -929,11 +1044,6 @@ pub const Value = extern union {
len: u64,
};
- pub const SingleConstPtrType = struct {
- base: Payload = Payload{ .tag = .single_const_ptr_type },
- elem_type: *Type,
- };
-
/// Represents a pointer to another immutable value.
pub const RefVal = struct {
base: Payload = Payload{ .tag = .ref_val },
diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig
index a00782771d..43c0eac197 100644
--- a/src-self-hosted/zir.zig
+++ b/src-self-hosted/zir.zig
@@ -12,27 +12,43 @@ const TypedValue = @import("TypedValue.zig");
const ir = @import("ir.zig");
const IrModule = @import("Module.zig");
+/// This struct is relevent only for the ZIR Module text format. It is not used for
+/// semantic analysis of Zig source code.
+pub const Decl = struct {
+ name: []const u8,
+
+ /// Hash of slice into the source of the part after the = and before the next instruction.
+ contents_hash: std.zig.SrcHash,
+
+ inst: *Inst,
+};
+
/// These are instructions that correspond to the ZIR text format. See `ir.Inst` for
/// in-memory, analyzed instructions with types and values.
pub const Inst = struct {
tag: Tag,
/// Byte offset into the source.
src: usize,
- name: []const u8,
-
- /// Slice into the source of the part after the = and before the next instruction.
- contents: []const u8 = &[0]u8{},
+ /// Pre-allocated field for mapping ZIR text instructions to post-analysis instructions.
+ analyzed_inst: ?*ir.Inst = null,
/// These names are used directly as the instruction names in the text format.
pub const Tag = enum {
breakpoint,
call,
compileerror,
+ /// Special case, has no textual representation.
+ @"const",
/// Represents a pointer to a global decl by name.
declref,
+ /// Represents a pointer to a global decl by string name.
+ declref_str,
/// The syntax `@foo` is equivalent to `declval("foo")`.
/// declval is equivalent to declref followed by deref.
declval,
+ /// Same as declval but the parameter is a `*Module.Decl` rather than a name.
+ declval_in_module,
+ /// String Literal. Makes an anonymous Decl and then takes a pointer to it.
str,
int,
ptrtoint,
@@ -42,11 +58,11 @@ pub const Inst = struct {
@"asm",
@"unreachable",
@"return",
+ returnvoid,
@"fn",
+ fntype,
@"export",
primitive,
- ref,
- fntype,
intcast,
bitcast,
elemptr,
@@ -62,8 +78,11 @@ pub const Inst = struct {
.breakpoint => Breakpoint,
.call => Call,
.declref => DeclRef,
+ .declref_str => DeclRefStr,
.declval => DeclVal,
+ .declval_in_module => DeclValInModule,
.compileerror => CompileError,
+ .@"const" => Const,
.str => Str,
.int => Int,
.ptrtoint => PtrToInt,
@@ -73,10 +92,10 @@ pub const Inst = struct {
.@"asm" => Asm,
.@"unreachable" => Unreachable,
.@"return" => Return,
+ .returnvoid => ReturnVoid,
.@"fn" => Fn,
.@"export" => Export,
.primitive => Primitive,
- .ref => Ref,
.fntype => FnType,
.intcast => IntCast,
.bitcast => BitCast,
@@ -122,6 +141,16 @@ pub const Inst = struct {
base: Inst,
positionals: struct {
+ name: []const u8,
+ },
+ kw_args: struct {},
+ };
+
+ pub const DeclRefStr = struct {
+ pub const base_tag = Tag.declref_str;
+ base: Inst,
+
+ positionals: struct {
name: *Inst,
},
kw_args: struct {},
@@ -137,6 +166,16 @@ pub const Inst = struct {
kw_args: struct {},
};
+ pub const DeclValInModule = struct {
+ pub const base_tag = Tag.declval_in_module;
+ base: Inst,
+
+ positionals: struct {
+ decl: *IrModule.Decl,
+ },
+ kw_args: struct {},
+ };
+
pub const CompileError = struct {
pub const base_tag = Tag.compileerror;
base: Inst,
@@ -147,6 +186,16 @@ pub const Inst = struct {
kw_args: struct {},
};
+ pub const Const = struct {
+ pub const base_tag = Tag.@"const";
+ base: Inst,
+
+ positionals: struct {
+ typed_value: TypedValue,
+ },
+ kw_args: struct {},
+ };
+
pub const Str = struct {
pub const base_tag = Tag.str;
base: Inst,
@@ -168,6 +217,7 @@ pub const Inst = struct {
};
pub const PtrToInt = struct {
+ pub const builtin_name = "@ptrToInt";
pub const base_tag = Tag.ptrtoint;
base: Inst,
@@ -238,6 +288,16 @@ pub const Inst = struct {
pub const base_tag = Tag.@"return";
base: Inst,
+ positionals: struct {
+ operand: *Inst,
+ },
+ kw_args: struct {},
+ };
+
+ pub const ReturnVoid = struct {
+ pub const base_tag = Tag.returnvoid;
+ base: Inst,
+
positionals: struct {},
kw_args: struct {},
};
@@ -253,23 +313,26 @@ pub const Inst = struct {
kw_args: struct {},
};
- pub const Export = struct {
- pub const base_tag = Tag.@"export";
+ pub const FnType = struct {
+ pub const base_tag = Tag.fntype;
base: Inst,
positionals: struct {
- symbol_name: *Inst,
- value: *Inst,
+ param_types: []*Inst,
+ return_type: *Inst,
+ },
+ kw_args: struct {
+ cc: std.builtin.CallingConvention = .Unspecified,
},
- kw_args: struct {},
};
- pub const Ref = struct {
- pub const base_tag = Tag.ref;
+ pub const Export = struct {
+ pub const base_tag = Tag.@"export";
base: Inst,
positionals: struct {
- operand: *Inst,
+ symbol_name: *Inst,
+ decl_name: []const u8,
},
kw_args: struct {},
};
@@ -348,19 +411,6 @@ pub const Inst = struct {
};
};
- pub const FnType = struct {
- pub const base_tag = Tag.fntype;
- base: Inst,
-
- positionals: struct {
- param_types: []*Inst,
- return_type: *Inst,
- },
- kw_args: struct {
- cc: std.builtin.CallingConvention = .Unspecified,
- },
- };
-
pub const IntCast = struct {
pub const base_tag = Tag.intcast;
base: Inst,
@@ -456,7 +506,7 @@ pub const ErrorMsg = struct {
};
pub const Module = struct {
- decls: []*Inst,
+ decls: []*Decl,
arena: std.heap.ArenaAllocator,
error_msg: ?ErrorMsg = null,
@@ -475,13 +525,33 @@ pub const Module = struct {
self.writeToStream(std.heap.page_allocator, std.io.getStdErr().outStream()) catch {};
}
- const InstPtrTable = std.AutoHashMap(*Inst, struct { inst: *Inst, index: ?usize });
+ const InstPtrTable = std.AutoHashMap(*Inst, struct { inst: *Inst, index: ?usize, name: []const u8 });
+
+ const DeclAndIndex = struct {
+ decl: *Decl,
+ index: usize,
+ };
/// TODO Look into making a table to speed this up.
- pub fn findDecl(self: Module, name: []const u8) ?*Inst {
- for (self.decls) |decl| {
+ pub fn findDecl(self: Module, name: []const u8) ?DeclAndIndex {
+ for (self.decls) |decl, i| {
if (mem.eql(u8, decl.name, name)) {
- return decl;
+ return DeclAndIndex{
+ .decl = decl,
+ .index = i,
+ };
+ }
+ }
+ return null;
+ }
+
+ pub fn findInstDecl(self: Module, inst: *Inst) ?DeclAndIndex {
+ for (self.decls) |decl, i| {
+ if (decl.inst == inst) {
+ return DeclAndIndex{
+ .decl = decl,
+ .index = i,
+ };
}
}
return null;
@@ -497,18 +567,18 @@ pub const Module = struct {
try inst_table.ensureCapacity(self.decls.len);
for (self.decls) |decl, decl_i| {
- try inst_table.putNoClobber(decl, .{ .inst = decl, .index = null });
+ try inst_table.putNoClobber(decl.inst, .{ .inst = decl.inst, .index = null, .name = decl.name });
- if (decl.cast(Inst.Fn)) |fn_inst| {
+ if (decl.inst.cast(Inst.Fn)) |fn_inst| {
for (fn_inst.positionals.body.instructions) |inst, inst_i| {
- try inst_table.putNoClobber(inst, .{ .inst = inst, .index = inst_i });
+ try inst_table.putNoClobber(inst, .{ .inst = inst, .index = inst_i, .name = undefined });
}
}
}
for (self.decls) |decl, i| {
try stream.print("@{} ", .{decl.name});
- try self.writeInstToStream(stream, decl, &inst_table);
+ try self.writeInstToStream(stream, decl.inst, &inst_table);
try stream.writeByte('\n');
}
}
@@ -516,38 +586,41 @@ pub const Module = struct {
fn writeInstToStream(
self: Module,
stream: var,
- decl: *Inst,
+ inst: *Inst,
inst_table: *const InstPtrTable,
) @TypeOf(stream).Error!void {
// TODO I tried implementing this with an inline for loop and hit a compiler bug
- switch (decl.tag) {
- .breakpoint => return self.writeInstToStreamGeneric(stream, .breakpoint, decl, inst_table),
- .call => return self.writeInstToStreamGeneric(stream, .call, decl, inst_table),
- .declref => return self.writeInstToStreamGeneric(stream, .declref, decl, inst_table),
- .declval => return self.writeInstToStreamGeneric(stream, .declval, decl, inst_table),
- .compileerror => return self.writeInstToStreamGeneric(stream, .compileerror, decl, inst_table),
- .str => return self.writeInstToStreamGeneric(stream, .str, decl, inst_table),
- .int => return self.writeInstToStreamGeneric(stream, .int, decl, inst_table),
- .ptrtoint => return self.writeInstToStreamGeneric(stream, .ptrtoint, decl, inst_table),
- .fieldptr => return self.writeInstToStreamGeneric(stream, .fieldptr, decl, inst_table),
- .deref => return self.writeInstToStreamGeneric(stream, .deref, decl, inst_table),
- .as => return self.writeInstToStreamGeneric(stream, .as, decl, inst_table),
- .@"asm" => return self.writeInstToStreamGeneric(stream, .@"asm", decl, inst_table),
- .@"unreachable" => return self.writeInstToStreamGeneric(stream, .@"unreachable", decl, inst_table),
- .@"return" => return self.writeInstToStreamGeneric(stream, .@"return", decl, inst_table),
- .@"fn" => return self.writeInstToStreamGeneric(stream, .@"fn", decl, inst_table),
- .@"export" => return self.writeInstToStreamGeneric(stream, .@"export", decl, inst_table),
- .ref => return self.writeInstToStreamGeneric(stream, .ref, decl, inst_table),
- .primitive => return self.writeInstToStreamGeneric(stream, .primitive, decl, inst_table),
- .fntype => return self.writeInstToStreamGeneric(stream, .fntype, decl, inst_table),
- .intcast => return self.writeInstToStreamGeneric(stream, .intcast, decl, inst_table),
- .bitcast => return self.writeInstToStreamGeneric(stream, .bitcast, decl, inst_table),
- .elemptr => return self.writeInstToStreamGeneric(stream, .elemptr, decl, inst_table),
- .add => return self.writeInstToStreamGeneric(stream, .add, decl, inst_table),
- .cmp => return self.writeInstToStreamGeneric(stream, .cmp, decl, inst_table),
- .condbr => return self.writeInstToStreamGeneric(stream, .condbr, decl, inst_table),
- .isnull => return self.writeInstToStreamGeneric(stream, .isnull, decl, inst_table),
- .isnonnull => return self.writeInstToStreamGeneric(stream, .isnonnull, decl, inst_table),
+ switch (inst.tag) {
+ .breakpoint => return self.writeInstToStreamGeneric(stream, .breakpoint, inst, inst_table),
+ .call => return self.writeInstToStreamGeneric(stream, .call, inst, inst_table),
+ .declref => return self.writeInstToStreamGeneric(stream, .declref, inst, inst_table),
+ .declref_str => return self.writeInstToStreamGeneric(stream, .declref_str, inst, inst_table),
+ .declval => return self.writeInstToStreamGeneric(stream, .declval, inst, inst_table),
+ .declval_in_module => return self.writeInstToStreamGeneric(stream, .declval_in_module, inst, inst_table),
+ .compileerror => return self.writeInstToStreamGeneric(stream, .compileerror, inst, inst_table),
+ .@"const" => return self.writeInstToStreamGeneric(stream, .@"const", inst, inst_table),
+ .str => return self.writeInstToStreamGeneric(stream, .str, inst, inst_table),
+ .int => return self.writeInstToStreamGeneric(stream, .int, inst, inst_table),
+ .ptrtoint => return self.writeInstToStreamGeneric(stream, .ptrtoint, inst, inst_table),
+ .fieldptr => return self.writeInstToStreamGeneric(stream, .fieldptr, inst, inst_table),
+ .deref => return self.writeInstToStreamGeneric(stream, .deref, inst, inst_table),
+ .as => return self.writeInstToStreamGeneric(stream, .as, inst, inst_table),
+ .@"asm" => return self.writeInstToStreamGeneric(stream, .@"asm", inst, inst_table),
+ .@"unreachable" => return self.writeInstToStreamGeneric(stream, .@"unreachable", inst, inst_table),
+ .@"return" => return self.writeInstToStreamGeneric(stream, .@"return", inst, inst_table),
+ .returnvoid => return self.writeInstToStreamGeneric(stream, .returnvoid, inst, inst_table),
+ .@"fn" => return self.writeInstToStreamGeneric(stream, .@"fn", inst, inst_table),
+ .@"export" => return self.writeInstToStreamGeneric(stream, .@"export", inst, inst_table),
+ .primitive => return self.writeInstToStreamGeneric(stream, .primitive, inst, inst_table),
+ .fntype => return self.writeInstToStreamGeneric(stream, .fntype, inst, inst_table),
+ .intcast => return self.writeInstToStreamGeneric(stream, .intcast, inst, inst_table),
+ .bitcast => return self.writeInstToStreamGeneric(stream, .bitcast, inst, inst_table),
+ .elemptr => return self.writeInstToStreamGeneric(stream, .elemptr, inst, inst_table),
+ .add => return self.writeInstToStreamGeneric(stream, .add, inst, inst_table),
+ .cmp => return self.writeInstToStreamGeneric(stream, .cmp, inst, inst_table),
+ .condbr => return self.writeInstToStreamGeneric(stream, .condbr, inst, inst_table),
+ .isnull => return self.writeInstToStreamGeneric(stream, .isnull, inst, inst_table),
+ .isnonnull => return self.writeInstToStreamGeneric(stream, .isnonnull, inst, inst_table),
}
}
@@ -619,6 +692,8 @@ pub const Module = struct {
bool => return stream.writeByte("01"[@boolToInt(param)]),
[]u8, []const u8 => return std.zig.renderStringLiteral(param, stream),
BigIntConst => return stream.print("{}", .{param}),
+ TypedValue => unreachable, // this is a special case
+ *IrModule.Decl => unreachable, // this is a special case
else => |T| @compileError("unimplemented: rendering parameter of type " ++ @typeName(T)),
}
}
@@ -628,13 +703,16 @@ pub const Module = struct {
if (info.index) |i| {
try stream.print("%{}", .{info.index});
} else {
- try stream.print("@{}", .{info.inst.name});
+ try stream.print("@{}", .{info.name});
}
} else if (inst.cast(Inst.DeclVal)) |decl_val| {
try stream.print("@{}", .{decl_val.positionals.name});
+ } else if (inst.cast(Inst.DeclValInModule)) |decl_val| {
+ try stream.print("@{}", .{decl_val.positionals.decl.name});
} else {
- //try stream.print("?", .{});
- unreachable;
+ // This should be unreachable in theory, but since ZIR is used for debugging the compiler
+ // we output some debug text instead.
+ try stream.print("?{}?", .{@tagName(inst.tag)});
}
}
};
@@ -673,7 +751,7 @@ const Parser = struct {
arena: std.heap.ArenaAllocator,
i: usize,
source: [:0]const u8,
- decls: std.ArrayListUnmanaged(*Inst),
+ decls: std.ArrayListUnmanaged(*Decl),
global_name_map: *std.StringHashMap(usize),
error_msg: ?ErrorMsg = null,
unnamed_index: usize,
@@ -702,12 +780,12 @@ const Parser = struct {
skipSpace(self);
try requireEatBytes(self, "=");
skipSpace(self);
- const inst = try parseInstruction(self, &body_context, ident);
+ const decl = try parseInstruction(self, &body_context, ident);
const ident_index = body_context.instructions.items.len;
if (try body_context.name_map.put(ident, ident_index)) |_| {
return self.fail("redefinition of identifier '{}'", .{ident});
}
- try body_context.instructions.append(inst);
+ try body_context.instructions.append(decl.inst);
continue;
},
' ', '\n' => continue,
@@ -857,7 +935,7 @@ const Parser = struct {
return error.ParseFailure;
}
- fn parseInstruction(self: *Parser, body_ctx: ?*Body, name: []const u8) InnerError!*Inst {
+ fn parseInstruction(self: *Parser, body_ctx: ?*Body, name: []const u8) InnerError!*Decl {
const contents_start = self.i;
const fn_name = try skipToAndOver(self, '(');
inline for (@typeInfo(Inst.Tag).Enum.fields) |field| {
@@ -876,10 +954,9 @@ const Parser = struct {
body_ctx: ?*Body,
inst_name: []const u8,
contents_start: usize,
- ) InnerError!*Inst {
+ ) InnerError!*Decl {
const inst_specific = try self.arena.allocator.create(InstType);
inst_specific.base = .{
- .name = inst_name,
.src = self.i,
.tag = InstType.base_tag,
};
@@ -929,10 +1006,15 @@ const Parser = struct {
}
try requireEatBytes(self, ")");
- inst_specific.base.contents = self.source[contents_start..self.i];
+ const decl = try self.arena.allocator.create(Decl);
+ decl.* = .{
+ .name = inst_name,
+ .contents_hash = std.zig.hashSrc(self.source[contents_start..self.i]),
+ .inst = &inst_specific.base,
+ };
//std.debug.warn("parsed {} = '{}'\n", .{ inst_specific.base.name, inst_specific.base.contents });
- return &inst_specific.base;
+ return decl;
}
fn parseParameterGeneric(self: *Parser, comptime T: type, body_ctx: ?*Body) !T {
@@ -978,6 +1060,8 @@ const Parser = struct {
*Inst => return parseParameterInst(self, body_ctx),
[]u8, []const u8 => return self.parseStringLiteral(),
BigIntConst => return self.parseIntegerLiteral(),
+ TypedValue => return self.fail("'const' is a special instruction; not legal in ZIR text", .{}),
+ *IrModule.Decl => return self.fail("'declval_in_module' is a special instruction; not legal in ZIR text", .{}),
else => @compileError("Unimplemented: ir parseParameterGeneric for type " ++ @typeName(T)),
}
return self.fail("TODO parse parameter {}", .{@typeName(T)});
@@ -1014,7 +1098,6 @@ const Parser = struct {
const declval = try self.arena.allocator.create(Inst.DeclVal);
declval.* = .{
.base = .{
- .name = try self.generateName(),
.src = src,
.tag = Inst.DeclVal.base_tag,
},
@@ -1027,7 +1110,7 @@ const Parser = struct {
if (local_ref) {
return body_ctx.?.instructions.items[kv.value];
} else {
- return self.decls.items[kv.value];
+ return self.decls.items[kv.value].inst;
}
}
@@ -1046,7 +1129,7 @@ pub fn emit(allocator: *Allocator, old_module: IrModule) !Module {
.old_module = &old_module,
.next_auto_name = 0,
.names = std.StringHashMap(void).init(allocator),
- .primitive_table = std.AutoHashMap(Inst.Primitive.Builtin, *Inst).init(allocator),
+ .primitive_table = std.AutoHashMap(Inst.Primitive.Builtin, *Decl).init(allocator),
};
defer ctx.decls.deinit(allocator);
defer ctx.names.deinit();
@@ -1065,10 +1148,10 @@ const EmitZIR = struct {
allocator: *Allocator,
arena: std.heap.ArenaAllocator,
old_module: *const IrModule,
- decls: std.ArrayListUnmanaged(*Inst),
+ decls: std.ArrayListUnmanaged(*Decl),
names: std.StringHashMap(void),
next_auto_name: usize,
- primitive_table: std.AutoHashMap(Inst.Primitive.Builtin, *Inst),
+ primitive_table: std.AutoHashMap(Inst.Primitive.Builtin, *Decl),
fn emit(self: *EmitZIR) !void {
// Put all the Decls in a list and sort them by name to avoid nondeterminism introduced
@@ -1087,52 +1170,90 @@ const EmitZIR = struct {
}
std.sort.sort(*IrModule.Decl, src_decls.items, {}, (struct {
fn lessThan(context: void, a: *IrModule.Decl, b: *IrModule.Decl) bool {
- return a.src < b.src;
+ return a.src_index < b.src_index;
}
}).lessThan);
// Emit all the decls.
for (src_decls.items) |ir_decl| {
+ switch (ir_decl.analysis) {
+ .unreferenced => continue,
+ .complete => {},
+ .in_progress => unreachable,
+ .outdated => unreachable,
+
+ .sema_failure,
+ .sema_failure_retryable,
+ .codegen_failure,
+ .dependency_failure,
+ .codegen_failure_retryable,
+ => if (self.old_module.failed_decls.getValue(ir_decl)) |err_msg| {
+ const fail_inst = try self.arena.allocator.create(Inst.CompileError);
+ fail_inst.* = .{
+ .base = .{
+ .src = ir_decl.src(),
+ .tag = Inst.CompileError.base_tag,
+ },
+ .positionals = .{
+ .msg = try self.arena.allocator.dupe(u8, err_msg.msg),
+ },
+ .kw_args = .{},
+ };
+ const decl = try self.arena.allocator.create(Decl);
+ decl.* = .{
+ .name = mem.spanZ(ir_decl.name),
+ .contents_hash = undefined,
+ .inst = &fail_inst.base,
+ };
+ try self.decls.append(self.allocator, decl);
+ continue;
+ },
+ }
if (self.old_module.export_owners.getValue(ir_decl)) |exports| {
for (exports) |module_export| {
- const declval = try self.emitDeclVal(ir_decl.src, mem.spanZ(module_export.exported_decl.name));
const symbol_name = try self.emitStringLiteral(module_export.src, module_export.options.name);
const export_inst = try self.arena.allocator.create(Inst.Export);
export_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = module_export.src,
.tag = Inst.Export.base_tag,
},
.positionals = .{
- .symbol_name = symbol_name,
- .value = declval,
+ .symbol_name = symbol_name.inst,
+ .decl_name = mem.spanZ(module_export.exported_decl.name),
},
.kw_args = .{},
};
- try self.decls.append(self.allocator, &export_inst.base);
+ _ = try self.emitUnnamedDecl(&export_inst.base);
}
} else {
- const new_decl = try self.emitTypedValue(ir_decl.src, ir_decl.typed_value.most_recent.typed_value);
+ const new_decl = try self.emitTypedValue(ir_decl.src(), ir_decl.typed_value.most_recent.typed_value);
new_decl.name = try self.arena.allocator.dupe(u8, mem.spanZ(ir_decl.name));
}
}
}
- fn resolveInst(self: *EmitZIR, inst_table: *std.AutoHashMap(*ir.Inst, *Inst), inst: *ir.Inst) !*Inst {
+ const ZirBody = struct {
+ inst_table: *std.AutoHashMap(*ir.Inst, *Inst),
+ instructions: *std.ArrayList(*Inst),
+ };
+
+ fn resolveInst(self: *EmitZIR, new_body: ZirBody, inst: *ir.Inst) !*Inst {
if (inst.cast(ir.Inst.Constant)) |const_inst| {
- const new_decl = if (const_inst.val.cast(Value.Payload.Function)) |func_pl| blk: {
+ const new_inst = if (const_inst.val.cast(Value.Payload.Function)) |func_pl| blk: {
const owner_decl = func_pl.func.owner_decl;
break :blk try self.emitDeclVal(inst.src, mem.spanZ(owner_decl.name));
} else if (const_inst.val.cast(Value.Payload.DeclRef)) |declref| blk: {
- break :blk try self.emitDeclRef(inst.src, declref.decl);
+ const decl_ref = try self.emitDeclRef(inst.src, declref.decl);
+ try new_body.instructions.append(decl_ref);
+ break :blk decl_ref;
} else blk: {
- break :blk try self.emitTypedValue(inst.src, .{ .ty = inst.ty, .val = const_inst.val });
+ break :blk (try self.emitTypedValue(inst.src, .{ .ty = inst.ty, .val = const_inst.val })).inst;
};
- try inst_table.putNoClobber(inst, new_decl);
- return new_decl;
+ try new_body.inst_table.putNoClobber(inst, new_inst);
+ return new_inst;
} else {
- return inst_table.getValue(inst).?;
+ return new_body.inst_table.getValue(inst).?;
}
}
@@ -1140,7 +1261,6 @@ const EmitZIR = struct {
const declval = try self.arena.allocator.create(Inst.DeclVal);
declval.* = .{
.base = .{
- .name = try self.autoName(),
.src = src,
.tag = Inst.DeclVal.base_tag,
},
@@ -1150,12 +1270,11 @@ const EmitZIR = struct {
return &declval.base;
}
- fn emitComptimeIntVal(self: *EmitZIR, src: usize, val: Value) !*Inst {
+ fn emitComptimeIntVal(self: *EmitZIR, src: usize, val: Value) !*Decl {
const big_int_space = try self.arena.allocator.create(Value.BigIntSpace);
const int_inst = try self.arena.allocator.create(Inst.Int);
int_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = src,
.tag = Inst.Int.base_tag,
},
@@ -1164,34 +1283,29 @@ const EmitZIR = struct {
},
.kw_args = .{},
};
- try self.decls.append(self.allocator, &int_inst.base);
- return &int_inst.base;
+ return self.emitUnnamedDecl(&int_inst.base);
}
- fn emitDeclRef(self: *EmitZIR, src: usize, decl: *IrModule.Decl) !*Inst {
- const declval = try self.emitDeclVal(src, mem.spanZ(decl.name));
- const ref_inst = try self.arena.allocator.create(Inst.Ref);
- ref_inst.* = .{
+ fn emitDeclRef(self: *EmitZIR, src: usize, module_decl: *IrModule.Decl) !*Inst {
+ const declref_inst = try self.arena.allocator.create(Inst.DeclRef);
+ declref_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = src,
- .tag = Inst.Ref.base_tag,
+ .tag = Inst.DeclRef.base_tag,
},
.positionals = .{
- .operand = declval,
+ .name = mem.spanZ(module_decl.name),
},
.kw_args = .{},
};
- try self.decls.append(self.allocator, &ref_inst.base);
-
- return &ref_inst.base;
+ return &declref_inst.base;
}
- fn emitTypedValue(self: *EmitZIR, src: usize, typed_value: TypedValue) Allocator.Error!*Inst {
+ fn emitTypedValue(self: *EmitZIR, src: usize, typed_value: TypedValue) Allocator.Error!*Decl {
const allocator = &self.arena.allocator;
if (typed_value.val.cast(Value.Payload.DeclRef)) |decl_ref| {
const decl = decl_ref.decl;
- return self.emitDeclRef(src, decl);
+ return try self.emitUnnamedDecl(try self.emitDeclRef(src, decl));
}
switch (typed_value.ty.zigTypeTag()) {
.Pointer => {
@@ -1218,18 +1332,16 @@ const EmitZIR = struct {
const as_inst = try self.arena.allocator.create(Inst.As);
as_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = src,
.tag = Inst.As.base_tag,
},
.positionals = .{
- .dest_type = try self.emitType(src, typed_value.ty),
- .value = try self.emitComptimeIntVal(src, typed_value.val),
+ .dest_type = (try self.emitType(src, typed_value.ty)).inst,
+ .value = (try self.emitComptimeIntVal(src, typed_value.val)).inst,
},
.kw_args = .{},
};
-
- return &as_inst.base;
+ return self.emitUnnamedDecl(&as_inst.base);
},
.Type => {
const ty = typed_value.val.toType();
@@ -1255,7 +1367,6 @@ const EmitZIR = struct {
const fail_inst = try self.arena.allocator.create(Inst.CompileError);
fail_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = src,
.tag = Inst.CompileError.base_tag,
},
@@ -1270,7 +1381,6 @@ const EmitZIR = struct {
const fail_inst = try self.arena.allocator.create(Inst.CompileError);
fail_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = src,
.tag = Inst.CompileError.base_tag,
},
@@ -1283,7 +1393,7 @@ const EmitZIR = struct {
},
}
- const fn_type = try self.emitType(src, module_fn.fn_type);
+ const fn_type = try self.emitType(src, typed_value.ty);
const arena_instrs = try self.arena.allocator.alloc(*Inst, instructions.items.len);
mem.copy(*Inst, arena_instrs, instructions.items);
@@ -1291,18 +1401,16 @@ const EmitZIR = struct {
const fn_inst = try self.arena.allocator.create(Inst.Fn);
fn_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = src,
.tag = Inst.Fn.base_tag,
},
.positionals = .{
- .fn_type = fn_type,
+ .fn_type = fn_type.inst,
.body = .{ .instructions = arena_instrs },
},
.kw_args = .{},
};
- try self.decls.append(self.allocator, &fn_inst.base);
- return &fn_inst.base;
+ return self.emitUnnamedDecl(&fn_inst.base);
},
.Array => {
// TODO more checks to make sure this can be emitted as a string literal
@@ -1318,7 +1426,6 @@ const EmitZIR = struct {
const str_inst = try self.arena.allocator.create(Inst.Str);
str_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = src,
.tag = Inst.Str.base_tag,
},
@@ -1327,8 +1434,7 @@ const EmitZIR = struct {
},
.kw_args = .{},
};
- try self.decls.append(self.allocator, &str_inst.base);
- return &str_inst.base;
+ return self.emitUnnamedDecl(&str_inst.base);
},
.Void => return self.emitPrimitive(src, .void_value),
else => |t| std.debug.panic("TODO implement emitTypedValue for {}", .{@tagName(t)}),
@@ -1339,7 +1445,6 @@ const EmitZIR = struct {
const new_inst = try self.arena.allocator.create(T);
new_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = src,
.tag = T.base_tag,
},
@@ -1355,6 +1460,10 @@ const EmitZIR = struct {
inst_table: *std.AutoHashMap(*ir.Inst, *Inst),
instructions: *std.ArrayList(*Inst),
) Allocator.Error!void {
+ const new_body = ZirBody{
+ .inst_table = inst_table,
+ .instructions = instructions,
+ };
for (body.instructions) |inst| {
const new_inst = switch (inst.tag) {
.breakpoint => try self.emitTrivial(inst.src, Inst.Breakpoint),
@@ -1364,16 +1473,15 @@ const EmitZIR = struct {
const args = try self.arena.allocator.alloc(*Inst, old_inst.args.args.len);
for (args) |*elem, i| {
- elem.* = try self.resolveInst(inst_table, old_inst.args.args[i]);
+ elem.* = try self.resolveInst(new_body, old_inst.args.args[i]);
}
new_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = inst.src,
.tag = Inst.Call.base_tag,
},
.positionals = .{
- .func = try self.resolveInst(inst_table, old_inst.args.func),
+ .func = try self.resolveInst(new_body, old_inst.args.func),
.args = args,
},
.kw_args = .{},
@@ -1381,7 +1489,22 @@ const EmitZIR = struct {
break :blk &new_inst.base;
},
.unreach => try self.emitTrivial(inst.src, Inst.Unreachable),
- .ret => try self.emitTrivial(inst.src, Inst.Return),
+ .ret => blk: {
+ const old_inst = inst.cast(ir.Inst.Ret).?;
+ const new_inst = try self.arena.allocator.create(Inst.Return);
+ new_inst.* = .{
+ .base = .{
+ .src = inst.src,
+ .tag = Inst.Return.base_tag,
+ },
+ .positionals = .{
+ .operand = try self.resolveInst(new_body, old_inst.args.operand),
+ },
+ .kw_args = .{},
+ };
+ break :blk &new_inst.base;
+ },
+ .retvoid => try self.emitTrivial(inst.src, Inst.ReturnVoid),
.constant => unreachable, // excluded from function bodies
.assembly => blk: {
const old_inst = inst.cast(ir.Inst.Assembly).?;
@@ -1389,33 +1512,32 @@ const EmitZIR = struct {
const inputs = try self.arena.allocator.alloc(*Inst, old_inst.args.inputs.len);
for (inputs) |*elem, i| {
- elem.* = try self.emitStringLiteral(inst.src, old_inst.args.inputs[i]);
+ elem.* = (try self.emitStringLiteral(inst.src, old_inst.args.inputs[i])).inst;
}
const clobbers = try self.arena.allocator.alloc(*Inst, old_inst.args.clobbers.len);
for (clobbers) |*elem, i| {
- elem.* = try self.emitStringLiteral(inst.src, old_inst.args.clobbers[i]);
+ elem.* = (try self.emitStringLiteral(inst.src, old_inst.args.clobbers[i])).inst;
}
const args = try self.arena.allocator.alloc(*Inst, old_inst.args.args.len);
for (args) |*elem, i| {
- elem.* = try self.resolveInst(inst_table, old_inst.args.args[i]);
+ elem.* = try self.resolveInst(new_body, old_inst.args.args[i]);
}
new_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = inst.src,
.tag = Inst.Asm.base_tag,
},
.positionals = .{
- .asm_source = try self.emitStringLiteral(inst.src, old_inst.args.asm_source),
- .return_type = try self.emitType(inst.src, inst.ty),
+ .asm_source = (try self.emitStringLiteral(inst.src, old_inst.args.asm_source)).inst,
+ .return_type = (try self.emitType(inst.src, inst.ty)).inst,
},
.kw_args = .{
.@"volatile" = old_inst.args.is_volatile,
.output = if (old_inst.args.output) |o|
- try self.emitStringLiteral(inst.src, o)
+ (try self.emitStringLiteral(inst.src, o)).inst
else
null,
.inputs = inputs,
@@ -1430,12 +1552,11 @@ const EmitZIR = struct {
const new_inst = try self.arena.allocator.create(Inst.PtrToInt);
new_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = inst.src,
.tag = Inst.PtrToInt.base_tag,
},
.positionals = .{
- .ptr = try self.resolveInst(inst_table, old_inst.args.ptr),
+ .ptr = try self.resolveInst(new_body, old_inst.args.ptr),
},
.kw_args = .{},
};
@@ -1446,13 +1567,12 @@ const EmitZIR = struct {
const new_inst = try self.arena.allocator.create(Inst.BitCast);
new_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = inst.src,
.tag = Inst.BitCast.base_tag,
},
.positionals = .{
- .dest_type = try self.emitType(inst.src, inst.ty),
- .operand = try self.resolveInst(inst_table, old_inst.args.operand),
+ .dest_type = (try self.emitType(inst.src, inst.ty)).inst,
+ .operand = try self.resolveInst(new_body, old_inst.args.operand),
},
.kw_args = .{},
};
@@ -1463,13 +1583,12 @@ const EmitZIR = struct {
const new_inst = try self.arena.allocator.create(Inst.Cmp);
new_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = inst.src,
.tag = Inst.Cmp.base_tag,
},
.positionals = .{
- .lhs = try self.resolveInst(inst_table, old_inst.args.lhs),
- .rhs = try self.resolveInst(inst_table, old_inst.args.rhs),
+ .lhs = try self.resolveInst(new_body, old_inst.args.lhs),
+ .rhs = try self.resolveInst(new_body, old_inst.args.rhs),
.op = old_inst.args.op,
},
.kw_args = .{},
@@ -1491,12 +1610,11 @@ const EmitZIR = struct {
const new_inst = try self.arena.allocator.create(Inst.CondBr);
new_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = inst.src,
.tag = Inst.CondBr.base_tag,
},
.positionals = .{
- .condition = try self.resolveInst(inst_table, old_inst.args.condition),
+ .condition = try self.resolveInst(new_body, old_inst.args.condition),
.true_body = .{ .instructions = true_body.toOwnedSlice() },
.false_body = .{ .instructions = false_body.toOwnedSlice() },
},
@@ -1509,12 +1627,11 @@ const EmitZIR = struct {
const new_inst = try self.arena.allocator.create(Inst.IsNull);
new_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = inst.src,
.tag = Inst.IsNull.base_tag,
},
.positionals = .{
- .operand = try self.resolveInst(inst_table, old_inst.args.operand),
+ .operand = try self.resolveInst(new_body, old_inst.args.operand),
},
.kw_args = .{},
};
@@ -1525,12 +1642,11 @@ const EmitZIR = struct {
const new_inst = try self.arena.allocator.create(Inst.IsNonNull);
new_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = inst.src,
.tag = Inst.IsNonNull.base_tag,
},
.positionals = .{
- .operand = try self.resolveInst(inst_table, old_inst.args.operand),
+ .operand = try self.resolveInst(new_body, old_inst.args.operand),
},
.kw_args = .{},
};
@@ -1542,7 +1658,7 @@ const EmitZIR = struct {
}
}
- fn emitType(self: *EmitZIR, src: usize, ty: Type) Allocator.Error!*Inst {
+ fn emitType(self: *EmitZIR, src: usize, ty: Type) Allocator.Error!*Decl {
switch (ty.tag()) {
.isize => return self.emitPrimitive(src, .isize),
.usize => return self.emitPrimitive(src, .usize),
@@ -1575,26 +1691,24 @@ const EmitZIR = struct {
ty.fnParamTypes(param_types);
const emitted_params = try self.arena.allocator.alloc(*Inst, param_types.len);
for (param_types) |param_type, i| {
- emitted_params[i] = try self.emitType(src, param_type);
+ emitted_params[i] = (try self.emitType(src, param_type)).inst;
}
const fntype_inst = try self.arena.allocator.create(Inst.FnType);
fntype_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = src,
.tag = Inst.FnType.base_tag,
},
.positionals = .{
.param_types = emitted_params,
- .return_type = try self.emitType(src, ty.fnReturnType()),
+ .return_type = (try self.emitType(src, ty.fnReturnType())).inst,
},
.kw_args = .{
.cc = ty.fnCallingConvention(),
},
};
- try self.decls.append(self.allocator, &fntype_inst.base);
- return &fntype_inst.base;
+ return self.emitUnnamedDecl(&fntype_inst.base);
},
else => std.debug.panic("TODO implement emitType for {}", .{ty}),
},
@@ -1613,13 +1727,12 @@ const EmitZIR = struct {
}
}
- fn emitPrimitive(self: *EmitZIR, src: usize, tag: Inst.Primitive.Builtin) !*Inst {
+ fn emitPrimitive(self: *EmitZIR, src: usize, tag: Inst.Primitive.Builtin) !*Decl {
const gop = try self.primitive_table.getOrPut(tag);
if (!gop.found_existing) {
const primitive_inst = try self.arena.allocator.create(Inst.Primitive);
primitive_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = src,
.tag = Inst.Primitive.base_tag,
},
@@ -1628,17 +1741,15 @@ const EmitZIR = struct {
},
.kw_args = .{},
};
- try self.decls.append(self.allocator, &primitive_inst.base);
- gop.kv.value = &primitive_inst.base;
+ gop.kv.value = try self.emitUnnamedDecl(&primitive_inst.base);
}
return gop.kv.value;
}
- fn emitStringLiteral(self: *EmitZIR, src: usize, str: []const u8) !*Inst {
+ fn emitStringLiteral(self: *EmitZIR, src: usize, str: []const u8) !*Decl {
const str_inst = try self.arena.allocator.create(Inst.Str);
str_inst.* = .{
.base = .{
- .name = try self.autoName(),
.src = src,
.tag = Inst.Str.base_tag,
},
@@ -1647,22 +1758,17 @@ const EmitZIR = struct {
},
.kw_args = .{},
};
- try self.decls.append(self.allocator, &str_inst.base);
+ return self.emitUnnamedDecl(&str_inst.base);
+ }
- const ref_inst = try self.arena.allocator.create(Inst.Ref);
- ref_inst.* = .{
- .base = .{
- .name = try self.autoName(),
- .src = src,
- .tag = Inst.Ref.base_tag,
- },
- .positionals = .{
- .operand = &str_inst.base,
- },
- .kw_args = .{},
+ fn emitUnnamedDecl(self: *EmitZIR, inst: *Inst) !*Decl {
+ const decl = try self.arena.allocator.create(Decl);
+ decl.* = .{
+ .name = try self.autoName(),
+ .contents_hash = undefined,
+ .inst = inst,
};
- try self.decls.append(self.allocator, &ref_inst.base);
-
- return &ref_inst.base;
+ try self.decls.append(self.allocator, decl);
+ return decl;
}
};