aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2021-04-13 12:34:27 -0700
committerAndrew Kelley <andrew@ziglang.org>2021-04-15 19:06:39 -0700
commit0170a242bb99e96fcb127e26e1b2fcbe5a19c4ee (patch)
treeab5dd10064527ff8fcd7d3f676466de9c30d1c2b
parent798ad631f3f9836de663bc6c728b415e0a13528f (diff)
downloadzig-0170a242bb99e96fcb127e26e1b2fcbe5a19c4ee.tar.gz
zig-0170a242bb99e96fcb127e26e1b2fcbe5a19c4ee.zip
stage2: move zir.Code to become root level fields of zir.zig
next commit will do the rename
-rw-r--r--src/AstGen.zig6
-rw-r--r--src/Module.zig206
-rw-r--r--src/Sema.zig474
-rw-r--r--src/zir.zig177
4 files changed, 431 insertions, 432 deletions
diff --git a/src/AstGen.zig b/src/AstGen.zig
index f5a5b22645..8bd71b8228 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -1,7 +1,7 @@
-//! A Work-In-Progress `zir.Code`. This is a shared parent of all
-//! `GenZir` scopes. Once the `zir.Code` is produced, this struct
+//! A Work-In-Progress `Zir`. This is a shared parent of all
+//! `GenZir` scopes. Once the `Zir` is produced, this struct
//! is deinitialized.
-//! The `GenZir.finish` function converts this to a `zir.Code`.
+//! The `GenZir.finish` function converts this to a `Zir`.
const AstGen = @This();
diff --git a/src/Module.zig b/src/Module.zig
index 1af4fef844..c57566ddb0 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -21,7 +21,7 @@ const TypedValue = @import("TypedValue.zig");
const Package = @import("Package.zig");
const link = @import("link.zig");
const ir = @import("ir.zig");
-const zir = @import("zir.zig");
+const Zir = @import("zir.zig"); // TODO rename this to Zir
const trace = @import("tracy.zig").trace;
const AstGen = @import("AstGen.zig");
const Sema = @import("Sema.zig");
@@ -464,7 +464,7 @@ pub const Fn = struct {
/// The first N elements of `extra` are indexes into `string_bytes` to
/// a null-terminated string.
/// This memory is managed with gpa, must be freed when the function is freed.
- zir: zir.Code,
+ zir: Zir,
/// undefined unless analysis state is `success`.
body: ir.Body,
state: Analysis,
@@ -808,7 +808,7 @@ pub const Scope = struct {
/// This `Block` maps a block ZIR instruction to the corresponding
/// TZIR instruction for break instruction analysis.
pub const Label = struct {
- zir_block: zir.Inst.Index,
+ zir_block: Zir.Inst.Index,
merges: Merges,
};
@@ -834,7 +834,7 @@ pub const Scope = struct {
/// For debugging purposes.
pub fn dump(block: *Block, mod: Module) void {
- zir.dumpBlock(mod, block);
+ Zir.dumpBlock(mod, block);
}
pub fn makeSubBlock(parent: *Block) Block {
@@ -1045,7 +1045,7 @@ pub const Scope = struct {
};
/// This is a temporary structure; references to it are valid only
- /// while constructing a `zir.Code`.
+ /// while constructing a `Zir`.
pub const GenZir = struct {
pub const base_tag: Tag = .gen_zir;
base: Scope = Scope{ .tag = base_tag },
@@ -1056,16 +1056,16 @@ pub const Scope = struct {
astgen: *AstGen,
/// Keeps track of the list of instructions in this scope only. Indexes
/// to instructions in `astgen`.
- instructions: ArrayListUnmanaged(zir.Inst.Index) = .{},
+ instructions: ArrayListUnmanaged(Zir.Inst.Index) = .{},
label: ?Label = null,
- break_block: zir.Inst.Index = 0,
- continue_block: zir.Inst.Index = 0,
+ break_block: Zir.Inst.Index = 0,
+ continue_block: Zir.Inst.Index = 0,
/// Only valid when setBreakResultLoc is called.
break_result_loc: AstGen.ResultLoc = undefined,
/// When a block has a pointer result location, here it is.
- rl_ptr: zir.Inst.Ref = .none,
+ rl_ptr: Zir.Inst.Ref = .none,
/// When a block has a type result location, here it is.
- rl_ty_inst: zir.Inst.Ref = .none,
+ rl_ty_inst: Zir.Inst.Ref = .none,
/// Keeps track of how many branches of a block did not actually
/// consume the result location. astgen uses this to figure out
/// whether to rely on break instructions or writing to the result
@@ -1077,25 +1077,25 @@ pub const Scope = struct {
break_count: usize = 0,
/// Tracks `break :foo bar` instructions so they can possibly be elided later if
/// the labeled block ends up not needing a result location pointer.
- labeled_breaks: ArrayListUnmanaged(zir.Inst.Index) = .{},
+ labeled_breaks: ArrayListUnmanaged(Zir.Inst.Index) = .{},
/// Tracks `store_to_block_ptr` instructions that correspond to break instructions
/// so they can possibly be elided later if the labeled block ends up not needing
/// a result location pointer.
- labeled_store_to_block_ptr_list: ArrayListUnmanaged(zir.Inst.Index) = .{},
+ labeled_store_to_block_ptr_list: ArrayListUnmanaged(Zir.Inst.Index) = .{},
pub const Label = struct {
token: ast.TokenIndex,
- block_inst: zir.Inst.Index,
+ block_inst: Zir.Inst.Index,
used: bool = false,
};
/// Only valid to call on the top of the `GenZir` stack. Completes the
- /// `AstGen` into a `zir.Code`. Leaves the `AstGen` in an
+ /// `AstGen` into a `Zir`. Leaves the `AstGen` in an
/// initialized, but empty, state.
- pub fn finish(gz: *GenZir) !zir.Code {
+ pub fn finish(gz: *GenZir) !Zir {
const gpa = gz.astgen.mod.gpa;
try gz.setBlockBody(0);
- return zir.Code{
+ return Zir{
.instructions = gz.astgen.instructions.toOwnedSlice(),
.string_bytes = gz.astgen.string_bytes.toOwnedSlice(gpa),
.extra = gz.astgen.extra.toOwnedSlice(gpa),
@@ -1148,24 +1148,24 @@ pub const Scope = struct {
}
}
- pub fn setBoolBrBody(gz: GenZir, inst: zir.Inst.Index) !void {
+ pub fn setBoolBrBody(gz: GenZir, inst: Zir.Inst.Index) !void {
const gpa = gz.astgen.mod.gpa;
try gz.astgen.extra.ensureCapacity(gpa, gz.astgen.extra.items.len +
- @typeInfo(zir.Inst.Block).Struct.fields.len + gz.instructions.items.len);
+ @typeInfo(Zir.Inst.Block).Struct.fields.len + gz.instructions.items.len);
const zir_datas = gz.astgen.instructions.items(.data);
zir_datas[inst].bool_br.payload_index = gz.astgen.addExtraAssumeCapacity(
- zir.Inst.Block{ .body_len = @intCast(u32, gz.instructions.items.len) },
+ Zir.Inst.Block{ .body_len = @intCast(u32, gz.instructions.items.len) },
);
gz.astgen.extra.appendSliceAssumeCapacity(gz.instructions.items);
}
- pub fn setBlockBody(gz: GenZir, inst: zir.Inst.Index) !void {
+ pub fn setBlockBody(gz: GenZir, inst: Zir.Inst.Index) !void {
const gpa = gz.astgen.mod.gpa;
try gz.astgen.extra.ensureCapacity(gpa, gz.astgen.extra.items.len +
- @typeInfo(zir.Inst.Block).Struct.fields.len + gz.instructions.items.len);
+ @typeInfo(Zir.Inst.Block).Struct.fields.len + gz.instructions.items.len);
const zir_datas = gz.astgen.instructions.items(.data);
zir_datas[inst].pl_node.payload_index = gz.astgen.addExtraAssumeCapacity(
- zir.Inst.Block{ .body_len = @intCast(u32, gz.instructions.items.len) },
+ Zir.Inst.Block{ .body_len = @intCast(u32, gz.instructions.items.len) },
);
gz.astgen.extra.appendSliceAssumeCapacity(gz.instructions.items);
}
@@ -1180,12 +1180,12 @@ pub const Scope = struct {
return str_index;
}
- pub fn addFnTypeCc(gz: *GenZir, tag: zir.Inst.Tag, args: struct {
+ pub fn addFnTypeCc(gz: *GenZir, tag: Zir.Inst.Tag, args: struct {
src_node: ast.Node.Index,
- param_types: []const zir.Inst.Ref,
- ret_ty: zir.Inst.Ref,
- cc: zir.Inst.Ref,
- }) !zir.Inst.Ref {
+ param_types: []const Zir.Inst.Ref,
+ ret_ty: Zir.Inst.Ref,
+ cc: Zir.Inst.Ref,
+ }) !Zir.Inst.Ref {
assert(args.src_node != 0);
assert(args.ret_ty != .none);
assert(args.cc != .none);
@@ -1193,16 +1193,16 @@ pub const Scope = struct {
try gz.instructions.ensureCapacity(gpa, gz.instructions.items.len + 1);
try gz.astgen.instructions.ensureCapacity(gpa, gz.astgen.instructions.len + 1);
try gz.astgen.extra.ensureCapacity(gpa, gz.astgen.extra.items.len +
- @typeInfo(zir.Inst.FnTypeCc).Struct.fields.len + args.param_types.len);
+ @typeInfo(Zir.Inst.FnTypeCc).Struct.fields.len + args.param_types.len);
- const payload_index = gz.astgen.addExtraAssumeCapacity(zir.Inst.FnTypeCc{
+ const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.FnTypeCc{
.return_type = args.ret_ty,
.cc = args.cc,
.param_types_len = @intCast(u32, args.param_types.len),
});
gz.astgen.appendRefsAssumeCapacity(args.param_types);
- const new_index = @intCast(zir.Inst.Index, gz.astgen.instructions.len);
+ const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
gz.astgen.instructions.appendAssumeCapacity(.{
.tag = tag,
.data = .{ .pl_node = .{
@@ -1214,26 +1214,26 @@ pub const Scope = struct {
return gz.astgen.indexToRef(new_index);
}
- pub fn addFnType(gz: *GenZir, tag: zir.Inst.Tag, args: struct {
+ pub fn addFnType(gz: *GenZir, tag: Zir.Inst.Tag, args: struct {
src_node: ast.Node.Index,
- ret_ty: zir.Inst.Ref,
- param_types: []const zir.Inst.Ref,
- }) !zir.Inst.Ref {
+ ret_ty: Zir.Inst.Ref,
+ param_types: []const Zir.Inst.Ref,
+ }) !Zir.Inst.Ref {
assert(args.src_node != 0);
assert(args.ret_ty != .none);
const gpa = gz.astgen.mod.gpa;
try gz.instructions.ensureCapacity(gpa, gz.instructions.items.len + 1);
try gz.astgen.instructions.ensureCapacity(gpa, gz.astgen.instructions.len + 1);
try gz.astgen.extra.ensureCapacity(gpa, gz.astgen.extra.items.len +
- @typeInfo(zir.Inst.FnType).Struct.fields.len + args.param_types.len);
+ @typeInfo(Zir.Inst.FnType).Struct.fields.len + args.param_types.len);
- const payload_index = gz.astgen.addExtraAssumeCapacity(zir.Inst.FnType{
+ const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.FnType{
.return_type = args.ret_ty,
.param_types_len = @intCast(u32, args.param_types.len),
});
gz.astgen.appendRefsAssumeCapacity(args.param_types);
- const new_index = @intCast(zir.Inst.Index, gz.astgen.instructions.len);
+ const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
gz.astgen.instructions.appendAssumeCapacity(.{
.tag = tag,
.data = .{ .pl_node = .{
@@ -1247,27 +1247,27 @@ pub const Scope = struct {
pub fn addCall(
gz: *GenZir,
- tag: zir.Inst.Tag,
- callee: zir.Inst.Ref,
- args: []const zir.Inst.Ref,
+ tag: Zir.Inst.Tag,
+ callee: Zir.Inst.Ref,
+ args: []const Zir.Inst.Ref,
/// Absolute node index. This function does the conversion to offset from Decl.
src_node: ast.Node.Index,
- ) !zir.Inst.Ref {
+ ) !Zir.Inst.Ref {
assert(callee != .none);
assert(src_node != 0);
const gpa = gz.astgen.mod.gpa;
try gz.instructions.ensureCapacity(gpa, gz.instructions.items.len + 1);
try gz.astgen.instructions.ensureCapacity(gpa, gz.astgen.instructions.len + 1);
try gz.astgen.extra.ensureCapacity(gpa, gz.astgen.extra.items.len +
- @typeInfo(zir.Inst.Call).Struct.fields.len + args.len);
+ @typeInfo(Zir.Inst.Call).Struct.fields.len + args.len);
- const payload_index = gz.astgen.addExtraAssumeCapacity(zir.Inst.Call{
+ const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Call{
.callee = callee,
.args_len = @intCast(u32, args.len),
});
gz.astgen.appendRefsAssumeCapacity(args);
- const new_index = @intCast(zir.Inst.Index, gz.astgen.instructions.len);
+ const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
gz.astgen.instructions.appendAssumeCapacity(.{
.tag = tag,
.data = .{ .pl_node = .{
@@ -1279,19 +1279,19 @@ pub const Scope = struct {
return gz.astgen.indexToRef(new_index);
}
- /// Note that this returns a `zir.Inst.Index` not a ref.
+ /// Note that this returns a `Zir.Inst.Index` not a ref.
/// Leaves the `payload_index` field undefined.
pub fn addBoolBr(
gz: *GenZir,
- tag: zir.Inst.Tag,
- lhs: zir.Inst.Ref,
- ) !zir.Inst.Index {
+ tag: Zir.Inst.Tag,
+ lhs: Zir.Inst.Ref,
+ ) !Zir.Inst.Index {
assert(lhs != .none);
const gpa = gz.astgen.mod.gpa;
try gz.instructions.ensureCapacity(gpa, gz.instructions.items.len + 1);
try gz.astgen.instructions.ensureCapacity(gpa, gz.astgen.instructions.len + 1);
- const new_index = @intCast(zir.Inst.Index, gz.astgen.instructions.len);
+ const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
gz.astgen.instructions.appendAssumeCapacity(.{
.tag = tag,
.data = .{ .bool_br = .{
@@ -1303,14 +1303,14 @@ pub const Scope = struct {
return new_index;
}
- pub fn addInt(gz: *GenZir, integer: u64) !zir.Inst.Ref {
+ pub fn addInt(gz: *GenZir, integer: u64) !Zir.Inst.Ref {
return gz.add(.{
.tag = .int,
.data = .{ .int = integer },
});
}
- pub fn addFloat(gz: *GenZir, number: f32, src_node: ast.Node.Index) !zir.Inst.Ref {
+ pub fn addFloat(gz: *GenZir, number: f32, src_node: ast.Node.Index) !Zir.Inst.Ref {
return gz.add(.{
.tag = .float,
.data = .{ .float = .{
@@ -1322,11 +1322,11 @@ pub const Scope = struct {
pub fn addUnNode(
gz: *GenZir,
- tag: zir.Inst.Tag,
- operand: zir.Inst.Ref,
+ tag: Zir.Inst.Tag,
+ operand: Zir.Inst.Ref,
/// Absolute node index. This function does the conversion to offset from Decl.
src_node: ast.Node.Index,
- ) !zir.Inst.Ref {
+ ) !Zir.Inst.Ref {
assert(operand != .none);
return gz.add(.{
.tag = tag,
@@ -1339,17 +1339,17 @@ pub const Scope = struct {
pub fn addPlNode(
gz: *GenZir,
- tag: zir.Inst.Tag,
+ tag: Zir.Inst.Tag,
/// Absolute node index. This function does the conversion to offset from Decl.
src_node: ast.Node.Index,
extra: anytype,
- ) !zir.Inst.Ref {
+ ) !Zir.Inst.Ref {
const gpa = gz.astgen.mod.gpa;
try gz.instructions.ensureCapacity(gpa, gz.instructions.items.len + 1);
try gz.astgen.instructions.ensureCapacity(gpa, gz.astgen.instructions.len + 1);
const payload_index = try gz.astgen.addExtra(extra);
- const new_index = @intCast(zir.Inst.Index, gz.astgen.instructions.len);
+ const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
gz.astgen.instructions.appendAssumeCapacity(.{
.tag = tag,
.data = .{ .pl_node = .{
@@ -1363,19 +1363,19 @@ pub const Scope = struct {
pub fn addArrayTypeSentinel(
gz: *GenZir,
- len: zir.Inst.Ref,
- sentinel: zir.Inst.Ref,
- elem_type: zir.Inst.Ref,
- ) !zir.Inst.Ref {
+ len: Zir.Inst.Ref,
+ sentinel: Zir.Inst.Ref,
+ elem_type: Zir.Inst.Ref,
+ ) !Zir.Inst.Ref {
const gpa = gz.astgen.mod.gpa;
try gz.instructions.ensureCapacity(gpa, gz.instructions.items.len + 1);
try gz.astgen.instructions.ensureCapacity(gpa, gz.astgen.instructions.len + 1);
- const payload_index = try gz.astgen.addExtra(zir.Inst.ArrayTypeSentinel{
+ const payload_index = try gz.astgen.addExtra(Zir.Inst.ArrayTypeSentinel{
.sentinel = sentinel,
.elem_type = elem_type,
});
- const new_index = @intCast(zir.Inst.Index, gz.astgen.instructions.len);
+ const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
gz.astgen.instructions.appendAssumeCapacity(.{
.tag = .array_type_sentinel,
.data = .{ .array_type_sentinel = .{
@@ -1389,11 +1389,11 @@ pub const Scope = struct {
pub fn addUnTok(
gz: *GenZir,
- tag: zir.Inst.Tag,
- operand: zir.Inst.Ref,
+ tag: Zir.Inst.Tag,
+ operand: Zir.Inst.Ref,
/// Absolute token index. This function does the conversion to Decl offset.
abs_tok_index: ast.TokenIndex,
- ) !zir.Inst.Ref {
+ ) !Zir.Inst.Ref {
assert(operand != .none);
return gz.add(.{
.tag = tag,
@@ -1406,11 +1406,11 @@ pub const Scope = struct {
pub fn addStrTok(
gz: *GenZir,
- tag: zir.Inst.Tag,
+ tag: Zir.Inst.Tag,
str_index: u32,
/// Absolute token index. This function does the conversion to Decl offset.
abs_tok_index: ast.TokenIndex,
- ) !zir.Inst.Ref {
+ ) !Zir.Inst.Ref {
return gz.add(.{
.tag = tag,
.data = .{ .str_tok = .{
@@ -1422,10 +1422,10 @@ pub const Scope = struct {
pub fn addBreak(
gz: *GenZir,
- tag: zir.Inst.Tag,
- break_block: zir.Inst.Index,
- operand: zir.Inst.Ref,
- ) !zir.Inst.Index {
+ tag: Zir.Inst.Tag,
+ break_block: Zir.Inst.Index,
+ operand: Zir.Inst.Ref,
+ ) !Zir.Inst.Index {
return gz.addAsIndex(.{
.tag = tag,
.data = .{ .@"break" = .{
@@ -1437,10 +1437,10 @@ pub const Scope = struct {
pub fn addBin(
gz: *GenZir,
- tag: zir.Inst.Tag,
- lhs: zir.Inst.Ref,
- rhs: zir.Inst.Ref,
- ) !zir.Inst.Ref {
+ tag: Zir.Inst.Tag,
+ lhs: Zir.Inst.Ref,
+ rhs: Zir.Inst.Ref,
+ ) !Zir.Inst.Ref {
assert(lhs != .none);
assert(rhs != .none);
return gz.add(.{
@@ -1454,10 +1454,10 @@ pub const Scope = struct {
pub fn addDecl(
gz: *GenZir,
- tag: zir.Inst.Tag,
+ tag: Zir.Inst.Tag,
decl_index: u32,
src_node: ast.Node.Index,
- ) !zir.Inst.Ref {
+ ) !Zir.Inst.Ref {
return gz.add(.{
.tag = tag,
.data = .{ .pl_node = .{
@@ -1469,10 +1469,10 @@ pub const Scope = struct {
pub fn addNode(
gz: *GenZir,
- tag: zir.Inst.Tag,
+ tag: Zir.Inst.Tag,
/// Absolute node index. This function does the conversion to offset from Decl.
src_node: ast.Node.Index,
- ) !zir.Inst.Ref {
+ ) !Zir.Inst.Ref {
return gz.add(.{
.tag = tag,
.data = .{ .node = gz.astgen.decl.nodeIndexToRelative(src_node) },
@@ -1482,9 +1482,9 @@ pub const Scope = struct {
/// Asserts that `str` is 8 or fewer bytes.
pub fn addSmallStr(
gz: *GenZir,
- tag: zir.Inst.Tag,
+ tag: Zir.Inst.Tag,
str: []const u8,
- ) !zir.Inst.Ref {
+ ) !Zir.Inst.Ref {
var buf: [9]u8 = undefined;
mem.copy(u8, &buf, str);
buf[str.len] = 0;
@@ -1495,11 +1495,11 @@ pub const Scope = struct {
});
}
- /// Note that this returns a `zir.Inst.Index` not a ref.
+ /// Note that this returns a `Zir.Inst.Index` not a ref.
/// Does *not* append the block instruction to the scope.
/// Leaves the `payload_index` field undefined.
- pub fn addBlock(gz: *GenZir, tag: zir.Inst.Tag, node: ast.Node.Index) !zir.Inst.Index {
- const new_index = @intCast(zir.Inst.Index, gz.astgen.instructions.len);
+ pub fn addBlock(gz: *GenZir, tag: Zir.Inst.Tag, node: ast.Node.Index) !Zir.Inst.Index {
+ const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
const gpa = gz.astgen.mod.gpa;
try gz.astgen.instructions.append(gpa, .{
.tag = tag,
@@ -1511,12 +1511,12 @@ pub const Scope = struct {
return new_index;
}
- /// Note that this returns a `zir.Inst.Index` not a ref.
+ /// Note that this returns a `Zir.Inst.Index` not a ref.
/// Leaves the `payload_index` field undefined.
- pub fn addCondBr(gz: *GenZir, tag: zir.Inst.Tag, node: ast.Node.Index) !zir.Inst.Index {
+ pub fn addCondBr(gz: *GenZir, tag: Zir.Inst.Tag, node: ast.Node.Index) !Zir.Inst.Index {
const gpa = gz.astgen.mod.gpa;
try gz.instructions.ensureCapacity(gpa, gz.instructions.items.len + 1);
- const new_index = @intCast(zir.Inst.Index, gz.astgen.instructions.len);
+ const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
try gz.astgen.instructions.append(gpa, .{
.tag = tag,
.data = .{ .pl_node = .{
@@ -1528,16 +1528,16 @@ pub const Scope = struct {
return new_index;
}
- pub fn add(gz: *GenZir, inst: zir.Inst) !zir.Inst.Ref {
+ pub fn add(gz: *GenZir, inst: Zir.Inst) !Zir.Inst.Ref {
return gz.astgen.indexToRef(try gz.addAsIndex(inst));
}
- pub fn addAsIndex(gz: *GenZir, inst: zir.Inst) !zir.Inst.Index {
+ pub fn addAsIndex(gz: *GenZir, inst: Zir.Inst) !Zir.Inst.Index {
const gpa = gz.astgen.mod.gpa;
try gz.instructions.ensureCapacity(gpa, gz.instructions.items.len + 1);
try gz.astgen.instructions.ensureCapacity(gpa, gz.astgen.instructions.len + 1);
- const new_index = @intCast(zir.Inst.Index, gz.astgen.instructions.len);
+ const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
gz.astgen.instructions.appendAssumeCapacity(inst);
gz.instructions.appendAssumeCapacity(new_index);
return new_index;
@@ -1554,7 +1554,7 @@ pub const Scope = struct {
parent: *Scope,
gen_zir: *GenZir,
name: []const u8,
- inst: zir.Inst.Ref,
+ inst: Zir.Inst.Ref,
/// Source location of the corresponding variable declaration.
src: LazySrcLoc,
};
@@ -1569,7 +1569,7 @@ pub const Scope = struct {
parent: *Scope,
gen_zir: *GenZir,
name: []const u8,
- ptr: zir.Inst.Ref,
+ ptr: Zir.Inst.Ref,
/// Source location of the corresponding variable declaration.
src: LazySrcLoc,
};
@@ -2511,7 +2511,7 @@ fn astgenAndSemaDecl(mod: *Module, decl: *Decl) !bool {
var analysis_arena = std.heap.ArenaAllocator.init(mod.gpa);
defer analysis_arena.deinit();
- var code: zir.Code = blk: {
+ var code: Zir = blk: {
var astgen = try AstGen.init(mod, decl, &analysis_arena.allocator);
defer astgen.deinit();
@@ -2578,7 +2578,7 @@ fn astgenAndSemaDecl(mod: *Module, decl: *Decl) !bool {
var analysis_arena = std.heap.ArenaAllocator.init(mod.gpa);
defer analysis_arena.deinit();
- var code: zir.Code = blk: {
+ var code: Zir = blk: {
var astgen = try AstGen.init(mod, decl, &analysis_arena.allocator);
defer astgen.deinit();
@@ -2676,7 +2676,7 @@ fn astgenAndSemaFn(
}
break :blk count;
};
- const param_types = try fn_type_scope_arena.allocator.alloc(zir.Inst.Ref, param_count);
+ const param_types = try fn_type_scope_arena.allocator.alloc(Zir.Inst.Ref, param_count);
var is_var_args = false;
{
@@ -2782,7 +2782,7 @@ fn astgenAndSemaFn(
else
false;
- const cc: zir.Inst.Ref = if (fn_proto.ast.callconv_expr != 0)
+ const cc: Zir.Inst.Ref = if (fn_proto.ast.callconv_expr != 0)
// TODO instead of enum literal type, this needs to be the
// std.builtin.CallingConvention enum. We need to implement importing other files
// and enums in order to fix this.
@@ -2797,8 +2797,8 @@ fn astgenAndSemaFn(
else
.none;
- const fn_type_inst: zir.Inst.Ref = if (cc != .none) fn_type: {
- const tag: zir.Inst.Tag = if (is_var_args) .fn_type_cc_var_args else .fn_type_cc;
+ const fn_type_inst: Zir.Inst.Ref = if (cc != .none) fn_type: {
+ const tag: Zir.Inst.Tag = if (is_var_args) .fn_type_cc_var_args else .fn_type_cc;
break :fn_type try fn_type_scope.addFnTypeCc(tag, .{
.src_node = fn_proto.ast.proto_node,
.ret_ty = return_type_inst,
@@ -2806,7 +2806,7 @@ fn astgenAndSemaFn(
.cc = cc,
});
} else fn_type: {
- const tag: zir.Inst.Tag = if (is_var_args) .fn_type_var_args else .fn_type;
+ const tag: Zir.Inst.Tag = if (is_var_args) .fn_type_var_args else .fn_type;
break :fn_type try fn_type_scope.addFnType(tag, .{
.src_node = fn_proto.ast.proto_node,
.ret_ty = return_type_inst,
@@ -2890,10 +2890,10 @@ fn astgenAndSemaFn(
const new_func = try decl_arena.allocator.create(Fn);
const fn_payload = try decl_arena.allocator.create(Value.Payload.Function);
- const fn_zir: zir.Code = blk: {
+ const fn_zir: Zir = blk: {
// We put the ZIR inside the Decl arena.
var astgen = try AstGen.init(mod, decl, &decl_arena.allocator);
- astgen.ref_start_index = @intCast(u32, zir.Inst.Ref.typed_value_map.len + param_count);
+ astgen.ref_start_index = @intCast(u32, Zir.Inst.Ref.typed_value_map.len + param_count);
defer astgen.deinit();
var gen_scope: Scope.GenZir = .{
@@ -2920,7 +2920,7 @@ fn astgenAndSemaFn(
.gen_zir = &gen_scope,
.name = param_name,
// Implicit const list first, then implicit arg list.
- .inst = @intToEnum(zir.Inst.Ref, @intCast(u32, zir.Inst.Ref.typed_value_map.len + i)),
+ .inst = @intToEnum(Zir.Inst.Ref, @intCast(u32, Zir.Inst.Ref.typed_value_map.len + i)),
.src = decl.tokSrcLoc(name_token),
};
params_scope = &sub_scope.base;
diff --git a/src/Sema.zig b/src/Sema.zig
index 35edf86de8..8a6c64046d 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1,6 +1,6 @@
//! Semantic analysis of ZIR instructions.
//! Shared to every Block. Stored on the stack.
-//! State used for compiling a `zir.Code` into TZIR.
+//! State used for compiling a `Zir` into TZIR.
//! Transforms untyped ZIR instructions into semantically-analyzed TZIR instructions.
//! Does type checking, comptime control flow, and safety-check generation.
//! This is the the heart of the Zig compiler.
@@ -10,7 +10,7 @@ mod: *Module,
gpa: *Allocator,
/// Points to the arena allocator of the Decl.
arena: *Allocator,
-code: zir.Code,
+code: Zir,
/// Maps ZIR to TZIR.
inst_map: []*Inst,
/// When analyzing an inline function call, owner_decl is the Decl of the caller
@@ -52,7 +52,7 @@ const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
const TypedValue = @import("TypedValue.zig");
const ir = @import("ir.zig");
-const zir = @import("zir.zig");
+const Zir = @import("zir.zig"); // TODO rename to Zir.zig
const Module = @import("Module.zig");
const Inst = ir.Inst;
const Body = ir.Body;
@@ -64,14 +64,14 @@ const LazySrcLoc = Module.LazySrcLoc;
const RangeSet = @import("RangeSet.zig");
const AstGen = @import("AstGen.zig");
-pub fn root(sema: *Sema, root_block: *Scope.Block) !zir.Inst.Index {
+pub fn root(sema: *Sema, root_block: *Scope.Block) !Zir.Inst.Index {
const inst_data = sema.code.instructions.items(.data)[0].pl_node;
- const extra = sema.code.extraData(zir.Inst.Block, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
const root_body = sema.code.extra[extra.end..][0..extra.data.body_len];
return sema.analyzeBody(root_block, root_body);
}
-pub fn rootAsRef(sema: *Sema, root_block: *Scope.Block) !zir.Inst.Ref {
+pub fn rootAsRef(sema: *Sema, root_block: *Scope.Block) !Zir.Inst.Ref {
const break_inst = try sema.root(root_block);
return sema.code.instructions.items(.data)[break_inst].@"break".operand;
}
@@ -89,7 +89,7 @@ pub fn rootAsType(sema: *Sema, root_block: *Scope.Block) !Type {
/// Returns only the result from the body that is specified.
/// Only appropriate to call when it is determined at comptime that this body
/// has no peers.
-fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const zir.Inst.Index) InnerError!*Inst {
+fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!*Inst {
const break_inst = try sema.analyzeBody(block, body);
const operand_ref = sema.code.instructions.items(.data)[break_inst].@"break".operand;
return sema.resolveInst(operand_ref);
@@ -99,22 +99,22 @@ fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const zir.Inst.Index) I
/// return type of `analyzeBody` so that we can tail call them.
/// Only appropriate to return when the instruction is known to be NoReturn
/// solely based on the ZIR tag.
-const always_noreturn: InnerError!zir.Inst.Index = @as(zir.Inst.Index, undefined);
+const always_noreturn: InnerError!Zir.Inst.Index = @as(Zir.Inst.Index, undefined);
/// This function is the main loop of `Sema` and it can be used in two different ways:
/// * The traditional way where there are N breaks out of the block and peer type
-/// resolution is done on the break operands. In this case, the `zir.Inst.Index`
+/// resolution is done on the break operands. In this case, the `Zir.Inst.Index`
/// part of the return value will be `undefined`, and callsites should ignore it,
/// finding the block result value via the block scope.
/// * The "flat" way. There is only 1 break out of the block, and it is with a `break_inline`
-/// instruction. In this case, the `zir.Inst.Index` part of the return value will be
+/// instruction. In this case, the `Zir.Inst.Index` part of the return value will be
/// the break instruction. This communicates both which block the break applies to, as
/// well as the operand. No block scope needs to be created for this strategy.
pub fn analyzeBody(
sema: *Sema,
block: *Scope.Block,
- body: []const zir.Inst.Index,
-) InnerError!zir.Inst.Index {
+ body: []const Zir.Inst.Index,
+) InnerError!Zir.Inst.Index {
// No tracy calls here, to avoid interfering with the tail call mechanism.
const map = block.sema.inst_map;
@@ -368,7 +368,7 @@ pub fn analyzeBody(
.block_inline => blk: {
// Directly analyze the block body without introducing a new block.
const inst_data = datas[inst].pl_node;
- const extra = sema.code.extraData(zir.Inst.Block, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len];
const break_inst = try sema.analyzeBody(block, inline_body);
const break_data = datas[break_inst].@"break";
@@ -381,7 +381,7 @@ pub fn analyzeBody(
.condbr_inline => blk: {
const inst_data = datas[inst].pl_node;
const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node };
- const extra = sema.code.extraData(zir.Inst.CondBr, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index);
const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len];
const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition);
@@ -401,19 +401,19 @@ pub fn analyzeBody(
}
/// TODO when we rework TZIR memory layout, this function will no longer have a possible error.
-pub fn resolveInst(sema: *Sema, zir_ref: zir.Inst.Ref) error{OutOfMemory}!*ir.Inst {
+pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) error{OutOfMemory}!*ir.Inst {
var i: usize = @enumToInt(zir_ref);
// First section of indexes correspond to a set number of constant values.
- if (i < zir.Inst.Ref.typed_value_map.len) {
+ if (i < Zir.Inst.Ref.typed_value_map.len) {
// TODO when we rework TZIR memory layout, this function can be as simple as:
- // if (zir_ref < zir.const_inst_list.len + sema.param_count)
+ // if (zir_ref < Zir.const_inst_list.len + sema.param_count)
// return zir_ref;
// Until then we allocate memory for a new, mutable `ir.Inst` to match what
// TZIR expects.
- return sema.mod.constInst(sema.arena, .unneeded, zir.Inst.Ref.typed_value_map[i]);
+ return sema.mod.constInst(sema.arena, .unneeded, Zir.Inst.Ref.typed_value_map[i]);
}
- i -= zir.Inst.Ref.typed_value_map.len;
+ i -= Zir.Inst.Ref.typed_value_map.len;
// Next section of indexes correspond to function parameters, if any.
if (i < sema.param_inst_list.len) {
@@ -429,7 +429,7 @@ fn resolveConstString(
sema: *Sema,
block: *Scope.Block,
src: LazySrcLoc,
- zir_ref: zir.Inst.Ref,
+ zir_ref: Zir.Inst.Ref,
) ![]u8 {
const tzir_inst = try sema.resolveInst(zir_ref);
const wanted_type = Type.initTag(.const_slice_u8);
@@ -438,7 +438,7 @@ fn resolveConstString(
return val.toAllocatedBytes(sema.arena);
}
-fn resolveType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, zir_ref: zir.Inst.Ref) !Type {
+fn resolveType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type {
const tzir_inst = try sema.resolveInst(zir_ref);
const wanted_type = Type.initTag(.@"type");
const coerced_inst = try sema.coerce(block, wanted_type, tzir_inst, src);
@@ -476,7 +476,7 @@ fn resolveAlreadyCoercedInt(
sema: *Sema,
block: *Scope.Block,
src: LazySrcLoc,
- zir_ref: zir.Inst.Ref,
+ zir_ref: Zir.Inst.Ref,
comptime Int: type,
) !Int {
comptime assert(@typeInfo(Int).Int.bits <= 64);
@@ -492,7 +492,7 @@ fn resolveInt(
sema: *Sema,
block: *Scope.Block,
src: LazySrcLoc,
- zir_ref: zir.Inst.Ref,
+ zir_ref: Zir.Inst.Ref,
dest_type: Type,
) !u64 {
const tzir_inst = try sema.resolveInst(zir_ref);
@@ -506,7 +506,7 @@ fn resolveInstConst(
sema: *Sema,
block: *Scope.Block,
src: LazySrcLoc,
- zir_ref: zir.Inst.Ref,
+ zir_ref: Zir.Inst.Ref,
) InnerError!TypedValue {
const tzir_inst = try sema.resolveInst(zir_ref);
const val = try sema.resolveConstValue(block, src, tzir_inst);
@@ -516,13 +516,13 @@ fn resolveInstConst(
};
}
-fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
return sema.mod.fail(&block.base, sema.src, "TODO implement zir_sema.zirBitcastResultPtr", .{});
}
-fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
return sema.mod.fail(&block.base, sema.src, "TODO implement zirCoerceResultPtr", .{});
@@ -531,7 +531,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) In
fn zirStructDecl(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
layout: std.builtin.TypeInfo.ContainerLayout,
) InnerError!*Inst {
const tracy = trace(@src());
@@ -540,7 +540,7 @@ fn zirStructDecl(
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- const extra = sema.code.extraData(zir.Inst.StructDecl, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.StructDecl, inst_data.payload_index);
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
const fields_len = extra.data.fields_len;
@@ -650,7 +650,7 @@ fn zirStructDecl(
const field_name_zir = sema.code.nullTerminatedString(sema.code.extra[extra_index]);
extra_index += 1;
- const field_type_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_index]);
+ const field_type_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
// This string needs to outlive the ZIR code.
@@ -669,7 +669,7 @@ fn zirStructDecl(
};
if (has_align) {
- const align_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_index]);
+ const align_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
// TODO: if we need to report an error here, use a source location
// that points to this alignment expression rather than the struct.
@@ -677,7 +677,7 @@ fn zirStructDecl(
gop.entry.value.abi_align = (try sema.resolveInstConst(block, src, align_ref)).val;
}
if (has_default) {
- const default_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_index]);
+ const default_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
// TODO: if we need to report an error here, use a source location
// that points to this default value expression rather than the struct.
@@ -692,7 +692,7 @@ fn zirStructDecl(
fn zirEnumDecl(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
nonexhaustive: bool,
) InnerError!*Inst {
const tracy = trace(@src());
@@ -701,7 +701,7 @@ fn zirEnumDecl(
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- const extra = sema.code.extraData(zir.Inst.EnumDecl, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.EnumDecl, inst_data.payload_index);
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
const fields_len = extra.data.fields_len;
@@ -842,7 +842,7 @@ fn zirEnumDecl(
assert(!gop.found_existing);
if (has_tag_value) {
- const tag_val_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_index]);
+ const tag_val_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
// TODO: if we need to report an error here, use a source location
// that points to this default value expression rather than the struct.
@@ -858,29 +858,29 @@ fn zirEnumDecl(
return sema.analyzeDeclVal(block, src, new_decl);
}
-fn zirUnionDecl(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirUnionDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- const extra = sema.code.extraData(zir.Inst.Block, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
return sema.mod.fail(&block.base, sema.src, "TODO implement zirUnionDecl", .{});
}
-fn zirOpaqueDecl(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirOpaqueDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- const extra = sema.code.extraData(zir.Inst.Block, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
return sema.mod.fail(&block.base, sema.src, "TODO implement zirOpaqueDecl", .{});
}
-fn zirRetPtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirRetPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -892,7 +892,7 @@ fn zirRetPtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!
return block.addNoOp(src, ptr_type, .alloc);
}
-fn zirRef(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -901,7 +901,7 @@ fn zirRef(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*In
return sema.analyzeRef(block, inst_data.src(), operand);
}
-fn zirRetType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirRetType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -912,7 +912,7 @@ fn zirRetType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError
return sema.mod.constType(sema.arena, src, ret_type);
}
-fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!void {
+fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -935,7 +935,7 @@ fn ensureResultUsed(
}
}
-fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!void {
+fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -948,7 +948,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Inde
}
}
-fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -982,7 +982,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) In
return sema.analyzeLoad(block, src, result_ptr, result_ptr.src);
}
-fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -995,7 +995,7 @@ fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*
return block.addNoOp(var_decl_src, ptr_type, .alloc);
}
-fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -1012,7 +1012,7 @@ fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerErro
fn zirAllocInferred(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
inferred_alloc_ty: Type,
) InnerError!*Inst {
const tracy = trace(@src());
@@ -1038,7 +1038,7 @@ fn zirAllocInferred(
return result;
}
-fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!void {
+fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -1064,7 +1064,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Inde
ptr.tag = .alloc;
}
-fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!void {
+fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -1072,26 +1072,26 @@ fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Ind
const mod = sema.mod;
const validate_inst = sema.code.instructions.items(.data)[inst].pl_node;
const struct_init_src = validate_inst.src();
- const validate_extra = sema.code.extraData(zir.Inst.Block, validate_inst.payload_index);
+ const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index);
const instrs = sema.code.extra[validate_extra.end..][0..validate_extra.data.body_len];
const struct_obj: *Module.Struct = s: {
const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node;
- const field_ptr_extra = sema.code.extraData(zir.Inst.Field, field_ptr_data.payload_index).data;
+ const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
const object_ptr = try sema.resolveInst(field_ptr_extra.lhs);
break :s object_ptr.ty.elemType().castTag(.@"struct").?.data;
};
// Maps field index to field_ptr index of where it was already initialized.
- const found_fields = try gpa.alloc(zir.Inst.Index, struct_obj.fields.entries.items.len);
+ const found_fields = try gpa.alloc(Zir.Inst.Index, struct_obj.fields.entries.items.len);
defer gpa.free(found_fields);
- mem.set(zir.Inst.Index, found_fields, 0);
+ mem.set(Zir.Inst.Index, found_fields, 0);
for (instrs) |field_ptr| {
const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node;
const field_src: LazySrcLoc = .{ .node_offset_back2tok = field_ptr_data.src_node };
- const field_ptr_extra = sema.code.extraData(zir.Inst.Field, field_ptr_data.payload_index).data;
+ const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start);
const field_index = struct_obj.fields.getIndex(field_name) orelse
return sema.failWithBadFieldAccess(block, struct_obj, field_src, field_name);
@@ -1164,7 +1164,7 @@ fn failWithBadFieldAccess(
return mod.failWithOwnedErrorMsg(&block.base, msg);
}
-fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!void {
+fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -1180,7 +1180,7 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) In
return sema.storePtr(block, src, bitcasted_ptr, value);
}
-fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!void {
+fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -1199,7 +1199,7 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index)
return sema.storePtr(block, src, bitcasted_ptr, value);
}
-fn zirSetEvalBranchQuota(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!void {
+fn zirSetEvalBranchQuota(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
try sema.requireFunctionBlock(block, src);
@@ -1208,7 +1208,7 @@ fn zirSetEvalBranchQuota(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index)
sema.branch_quota = quota;
}
-fn zirStore(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!void {
+fn zirStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -1218,19 +1218,19 @@ fn zirStore(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!v
return sema.storePtr(block, sema.src, ptr, value);
}
-fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!void {
+fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- const extra = sema.code.extraData(zir.Inst.Bin, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const ptr = try sema.resolveInst(extra.lhs);
const value = try sema.resolveInst(extra.rhs);
return sema.storePtr(block, src, ptr, value);
}
-fn zirParamType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -1266,7 +1266,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerErr
return sema.mod.constType(sema.arena, src, param_type);
}
-fn zirStr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -1292,7 +1292,7 @@ fn zirStr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*In
return sema.analyzeDeclRef(block, .unneeded, new_decl);
}
-fn zirInt(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -1300,7 +1300,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*In
return sema.mod.constIntUnsigned(sema.arena, .unneeded, Type.initTag(.comptime_int), int);
}
-fn zirFloat(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const arena = sema.arena;
const inst_data = sema.code.instructions.items(.data)[inst].float;
const src = inst_data.src();
@@ -1312,10 +1312,10 @@ fn zirFloat(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*
});
}
-fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const arena = sema.arena;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const extra = sema.code.extraData(zir.Inst.Float128, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data;
const src = inst_data.src();
const number = extra.get();
@@ -1325,7 +1325,7 @@ fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerErro
});
}
-fn zirCompileError(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!zir.Inst.Index {
+fn zirCompileError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index {
const tracy = trace(@src());
defer tracy.end();
@@ -1336,13 +1336,13 @@ fn zirCompileError(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) Inner
return sema.mod.fail(&block.base, src, "{s}", .{msg});
}
-fn zirCompileLog(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!void {
+fn zirCompileLog(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void {
var managed = sema.mod.compile_log_text.toManaged(sema.gpa);
defer sema.mod.compile_log_text = managed.moveToUnmanaged();
const writer = managed.writer();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const extra = sema.code.extraData(zir.Inst.MultiOp, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
const args = sema.code.refSlice(extra.end, extra.data.operands_len);
for (args) |arg_ref, i| {
@@ -1363,7 +1363,7 @@ fn zirCompileLog(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerEr
}
}
-fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!zir.Inst.Index {
+fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index {
const tracy = trace(@src());
defer tracy.end();
@@ -1373,13 +1373,13 @@ fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!
return always_noreturn;
}
-fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- const extra = sema.code.extraData(zir.Inst.Block, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
// TZIR expects a block outside the loop block too.
@@ -1434,13 +1434,13 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: zir.Inst.Index) InnerE
return sema.analyzeBlockBody(parent_block, src, &child_block, merges);
}
-fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- const extra = sema.code.extraData(zir.Inst.Block, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
// Reserve space for a Block instruction so that generated Break instructions can
@@ -1566,12 +1566,12 @@ fn analyzeBlockBody(
return &merges.block_inst.base;
}
-fn zirExport(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!void {
+fn zirExport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const extra = sema.code.extraData(zir.Inst.Bin, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src = inst_data.src();
const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
@@ -1588,7 +1588,7 @@ fn zirExport(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!
try sema.mod.analyzeExport(&block.base, src, export_name, actual_fn.owner_decl);
}
-fn zirBreakpoint(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!void {
+fn zirBreakpoint(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -1598,7 +1598,7 @@ fn zirBreakpoint(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerEr
_ = try block.addNoOp(src, Type.initTag(.void), .breakpoint);
}
-fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: zir.Inst.Index) InnerError!zir.Inst.Index {
+fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index {
const tracy = trace(@src());
defer tracy.end();
@@ -1638,7 +1638,7 @@ fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: zir.Inst.Index) InnerE
}
}
-fn zirDbgStmtNode(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!void {
+fn zirDbgStmtNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -1656,21 +1656,21 @@ fn zirDbgStmtNode(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerE
_ = try block.addDbgStmt(src, abs_byte_off);
}
-fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const decl = sema.owner_decl.dependencies.entries.items[inst_data.payload_index].key;
return sema.analyzeDeclRef(block, src, decl);
}
-fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const decl = sema.owner_decl.dependencies.entries.items[inst_data.payload_index].key;
return sema.analyzeDeclVal(block, src, decl);
}
-fn zirDeclRefNamed(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirDeclRefNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
const src = inst_data.src();
const decl_name = inst_data.get(sema.code);
@@ -1678,7 +1678,7 @@ fn zirDeclRefNamed(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) Inner
return sema.analyzeDeclRef(block, src, decl);
}
-fn zirDeclValNamed(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirDeclValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
const src = inst_data.src();
const decl_name = inst_data.get(sema.code);
@@ -1701,7 +1701,7 @@ fn lookupIdentifier(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, name: []c
fn zirCallNone(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
ensure_result_used: bool,
) InnerError!*Inst {
const tracy = trace(@src());
@@ -1716,7 +1716,7 @@ fn zirCallNone(
fn zirCall(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
modifier: std.builtin.CallOptions.Modifier,
ensure_result_used: bool,
) InnerError!*Inst {
@@ -1726,7 +1726,7 @@ fn zirCall(
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const func_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node };
const call_src = inst_data.src();
- const extra = sema.code.extraData(zir.Inst.Call, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.Call, inst_data.payload_index);
const args = sema.code.refSlice(extra.end, extra.data.args_len);
return sema.analyzeCall(block, extra.data.callee, func_src, call_src, modifier, ensure_result_used, args);
@@ -1735,12 +1735,12 @@ fn zirCall(
fn analyzeCall(
sema: *Sema,
block: *Scope.Block,
- zir_func: zir.Inst.Ref,
+ zir_func: Zir.Inst.Ref,
func_src: LazySrcLoc,
call_src: LazySrcLoc,
modifier: std.builtin.CallOptions.Modifier,
ensure_result_used: bool,
- zir_args: []const zir.Inst.Ref,
+ zir_args: []const Zir.Inst.Ref,
) InnerError!*ir.Inst {
const func = try sema.resolveInst(zir_func);
@@ -1886,7 +1886,7 @@ fn analyzeCall(
return result;
}
-fn zirIntType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -1897,7 +1897,7 @@ fn zirIntType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError
return sema.mod.constType(sema.arena, src, ty);
}
-fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -1909,7 +1909,7 @@ fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) Inner
return sema.mod.constType(sema.arena, src, opt_type);
}
-fn zirOptionalTypeFromPtrElem(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirOptionalTypeFromPtrElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -1921,7 +1921,7 @@ fn zirOptionalTypeFromPtrElem(sema: *Sema, block: *Scope.Block, inst: zir.Inst.I
return sema.mod.constType(sema.arena, inst_data.src(), opt_ty);
}
-fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -1934,14 +1934,14 @@ fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerErr
return sema.mod.constType(sema.arena, .unneeded, array_ty);
}
-fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
// TODO these should be lazily evaluated
const inst_data = sema.code.instructions.items(.data)[inst].array_type_sentinel;
const len = try sema.resolveInstConst(block, .unneeded, inst_data.len);
- const extra = sema.code.extraData(zir.Inst.ArrayTypeSentinel, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.ArrayTypeSentinel, inst_data.payload_index).data;
const sentinel = try sema.resolveInstConst(block, .unneeded, extra.sentinel);
const elem_type = try sema.resolveType(block, .unneeded, extra.elem_type);
const array_ty = try sema.mod.arrayType(sema.arena, len.val.toUnsignedInt(), sentinel.val, elem_type);
@@ -1949,12 +1949,12 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index)
return sema.mod.constType(sema.arena, .unneeded, array_ty);
}
-fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const extra = sema.code.extraData(zir.Inst.Bin, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
@@ -1970,7 +1970,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) Inn
return sema.mod.constType(sema.arena, src, err_union_ty);
}
-fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -1988,7 +1988,7 @@ fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerEr
});
}
-fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -2014,7 +2014,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerEr
return block.addUnOp(src, Type.initTag(.u16), .error_to_int, op_coerced);
}
-fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -2047,12 +2047,12 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerEr
return block.addUnOp(src, Type.initTag(.anyerror), .int_to_error, op);
}
-fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const extra = sema.code.extraData(zir.Inst.Bin, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
@@ -2126,7 +2126,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) Inn
});
}
-fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -2139,7 +2139,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerE
});
}
-fn zirEnumLiteralSmall(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirEnumLiteralSmall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -2152,7 +2152,7 @@ fn zirEnumLiteralSmall(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) I
});
}
-fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const mod = sema.mod;
const arena = sema.arena;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
@@ -2234,12 +2234,12 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerErr
return block.addUnOp(src, int_tag_ty, .bitcast, enum_tag);
}
-fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const mod = sema.mod;
const target = mod.getTarget();
const arena = sema.arena;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const extra = sema.code.extraData(zir.Inst.Bin, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src = inst_data.src();
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
@@ -2293,7 +2293,7 @@ fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerErr
fn zirOptionalPayloadPtr(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
safety_check: bool,
) InnerError!*Inst {
const tracy = trace(@src());
@@ -2336,7 +2336,7 @@ fn zirOptionalPayloadPtr(
fn zirOptionalPayload(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
safety_check: bool,
) InnerError!*Inst {
const tracy = trace(@src());
@@ -2374,7 +2374,7 @@ fn zirOptionalPayload(
fn zirErrUnionPayload(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
safety_check: bool,
) InnerError!*Inst {
const tracy = trace(@src());
@@ -2408,7 +2408,7 @@ fn zirErrUnionPayload(
fn zirErrUnionPayloadPtr(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
safety_check: bool,
) InnerError!*Inst {
const tracy = trace(@src());
@@ -2449,7 +2449,7 @@ fn zirErrUnionPayloadPtr(
}
/// Value in, value out
-fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -2473,7 +2473,7 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) Inner
}
/// Pointer in, value out
-fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -2499,7 +2499,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) In
return block.addUnOp(src, operand.ty.castTag(.error_union).?.data.payload, .unwrap_errunion_err_ptr, operand);
}
-fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!void {
+fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -2513,13 +2513,13 @@ fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Inde
}
}
-fn zirFnType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index, var_args: bool) InnerError!*Inst {
+fn zirFnType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, var_args: bool) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- const extra = sema.code.extraData(zir.Inst.FnType, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.FnType, inst_data.payload_index);
const param_types = sema.code.refSlice(extra.end, extra.data.param_types_len);
return sema.fnTypeCommon(
@@ -2532,14 +2532,14 @@ fn zirFnType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index, var_args: b
);
}
-fn zirFnTypeCc(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index, var_args: bool) InnerError!*Inst {
+fn zirFnTypeCc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, var_args: bool) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = inst_data.src_node };
- const extra = sema.code.extraData(zir.Inst.FnTypeCc, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.FnTypeCc, inst_data.payload_index);
const param_types = sema.code.refSlice(extra.end, extra.data.param_types_len);
const cc_tv = try sema.resolveInstConst(block, cc_src, extra.data.cc);
@@ -2562,8 +2562,8 @@ fn fnTypeCommon(
sema: *Sema,
block: *Scope.Block,
src_node_offset: i32,
- zir_param_types: []const zir.Inst.Ref,
- zir_return_type: zir.Inst.Ref,
+ zir_param_types: []const Zir.Inst.Ref,
+ zir_return_type: Zir.Inst.Ref,
cc: std.builtin.CallingConvention,
var_args: bool,
) InnerError!*Inst {
@@ -2608,7 +2608,7 @@ fn fnTypeCommon(
return sema.mod.constType(sema.arena, src, fn_ty);
}
-fn zirAs(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -2616,13 +2616,13 @@ fn zirAs(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Ins
return sema.analyzeAs(block, .unneeded, bin_inst.lhs, bin_inst.rhs);
}
-fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- const extra = sema.code.extraData(zir.Inst.As, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.As, inst_data.payload_index).data;
return sema.analyzeAs(block, src, extra.dest_type, extra.operand);
}
@@ -2630,15 +2630,15 @@ fn analyzeAs(
sema: *Sema,
block: *Scope.Block,
src: LazySrcLoc,
- zir_dest_type: zir.Inst.Ref,
- zir_operand: zir.Inst.Ref,
+ zir_dest_type: Zir.Inst.Ref,
+ zir_operand: Zir.Inst.Ref,
) InnerError!*Inst {
const dest_type = try sema.resolveType(block, src, zir_dest_type);
const operand = try sema.resolveInst(zir_operand);
return sema.coerce(block, dest_type, operand, src);
}
-fn zirPtrtoint(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirPtrtoint(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -2655,14 +2655,14 @@ fn zirPtrtoint(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerErro
return block.addUnOp(src, ty, .ptrtoint, ptr);
}
-fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node };
- const extra = sema.code.extraData(zir.Inst.Field, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(extra.field_name_start);
const object = try sema.resolveInst(extra.lhs);
const object_ptr = if (object.ty.zigTypeTag() == .Pointer)
@@ -2673,27 +2673,27 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerErro
return sema.analyzeLoad(block, src, result_ptr, result_ptr.src);
}
-fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node };
- const extra = sema.code.extraData(zir.Inst.Field, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(extra.field_name_start);
const object_ptr = try sema.resolveInst(extra.lhs);
return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src);
}
-fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
- const extra = sema.code.extraData(zir.Inst.FieldNamed, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data;
const object = try sema.resolveInst(extra.lhs);
const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name);
const object_ptr = try sema.analyzeRef(block, src, object);
@@ -2701,20 +2701,20 @@ fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) Inne
return sema.analyzeLoad(block, src, result_ptr, src);
}
-fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
- const extra = sema.code.extraData(zir.Inst.FieldNamed, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data;
const object_ptr = try sema.resolveInst(extra.lhs);
const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name);
return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src);
}
-fn zirIntcast(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirIntcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -2722,7 +2722,7 @@ fn zirIntcast(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError
const src = inst_data.src();
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
- const extra = sema.code.extraData(zir.Inst.Bin, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = try sema.resolveInst(extra.rhs);
@@ -2757,7 +2757,7 @@ fn zirIntcast(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError
return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten int", .{});
}
-fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -2765,14 +2765,14 @@ fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError
const src = inst_data.src();
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
- const extra = sema.code.extraData(zir.Inst.Bin, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = try sema.resolveInst(extra.rhs);
return sema.bitcast(block, dest_type, operand);
}
-fn zirFloatcast(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirFloatcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -2780,7 +2780,7 @@ fn zirFloatcast(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerErr
const src = inst_data.src();
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
- const extra = sema.code.extraData(zir.Inst.Bin, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = try sema.resolveInst(extra.rhs);
@@ -2815,7 +2815,7 @@ fn zirFloatcast(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerErr
return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten float", .{});
}
-fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -2830,14 +2830,14 @@ fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError
return sema.analyzeLoad(block, sema.src, result_ptr, sema.src);
}
-fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node };
- const extra = sema.code.extraData(zir.Inst.Bin, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array = try sema.resolveInst(extra.lhs);
const array_ptr = if (array.ty.zigTypeTag() == .Pointer)
array
@@ -2848,7 +2848,7 @@ fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerE
return sema.analyzeLoad(block, src, result_ptr, src);
}
-fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -2858,39 +2858,39 @@ fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError
return sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src);
}
-fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node };
- const extra = sema.code.extraData(zir.Inst.Bin, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src);
}
-fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- const extra = sema.code.extraData(zir.Inst.SliceStart, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.SliceStart, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const start = try sema.resolveInst(extra.start);
return sema.analyzeSlice(block, src, array_ptr, start, null, null, .unneeded);
}
-fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- const extra = sema.code.extraData(zir.Inst.SliceEnd, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.SliceEnd, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const start = try sema.resolveInst(extra.start);
const end = try sema.resolveInst(extra.end);
@@ -2898,14 +2898,14 @@ fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerErro
return sema.analyzeSlice(block, src, array_ptr, start, end, null, .unneeded);
}
-fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const sentinel_src: LazySrcLoc = .{ .node_offset_slice_sentinel = inst_data.src_node };
- const extra = sema.code.extraData(zir.Inst.SliceSentinel, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.SliceSentinel, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const start = try sema.resolveInst(extra.start);
const end = try sema.resolveInst(extra.end);
@@ -2917,7 +2917,7 @@ fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) Inne
fn zirSwitchCapture(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
is_multi: bool,
is_ref: bool,
) InnerError!*Inst {
@@ -2935,7 +2935,7 @@ fn zirSwitchCapture(
fn zirSwitchCaptureElse(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
is_ref: bool,
) InnerError!*Inst {
const tracy = trace(@src());
@@ -2952,9 +2952,9 @@ fn zirSwitchCaptureElse(
fn zirSwitchBlock(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
is_ref: bool,
- special_prong: zir.SpecialProng,
+ special_prong: Zir.SpecialProng,
) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -2962,7 +2962,7 @@ fn zirSwitchBlock(
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node };
- const extra = sema.code.extraData(zir.Inst.SwitchBlock, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index);
const operand_ptr = try sema.resolveInst(extra.data.operand);
const operand = if (is_ref)
@@ -2985,9 +2985,9 @@ fn zirSwitchBlock(
fn zirSwitchBlockMulti(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
is_ref: bool,
- special_prong: zir.SpecialProng,
+ special_prong: Zir.SpecialProng,
) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -2995,7 +2995,7 @@ fn zirSwitchBlockMulti(
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node };
- const extra = sema.code.extraData(zir.Inst.SwitchBlockMulti, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.SwitchBlockMulti, inst_data.payload_index);
const operand_ptr = try sema.resolveInst(extra.data.operand);
const operand = if (is_ref)
@@ -3020,16 +3020,16 @@ fn analyzeSwitch(
block: *Scope.Block,
operand: *Inst,
extra_end: usize,
- special_prong: zir.SpecialProng,
+ special_prong: Zir.SpecialProng,
scalar_cases_len: usize,
multi_cases_len: usize,
- switch_inst: zir.Inst.Index,
+ switch_inst: Zir.Inst.Index,
src_node_offset: i32,
) InnerError!*Inst {
const gpa = sema.gpa;
const mod = sema.mod;
- const special: struct { body: []const zir.Inst.Index, end: usize } = switch (special_prong) {
+ const special: struct { body: []const Zir.Inst.Index, end: usize } = switch (special_prong) {
.none => .{ .body = &.{}, .end = extra_end },
.under, .@"else" => blk: {
const body_len = sema.code.extra[extra_end];
@@ -3079,7 +3079,7 @@ fn analyzeSwitch(
{
var scalar_i: u32 = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
- const item_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_index]);
+ const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const body_len = sema.code.extra[extra_index];
extra_index += 1;
@@ -3189,7 +3189,7 @@ fn analyzeSwitch(
{
var scalar_i: u32 = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
- const item_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_index]);
+ const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const body_len = sema.code.extra[extra_index];
extra_index += 1;
@@ -3229,9 +3229,9 @@ fn analyzeSwitch(
var range_i: u32 = 0;
while (range_i < ranges_len) : (range_i += 1) {
- const item_first = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_index]);
+ const item_first = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
- const item_last = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_index]);
+ const item_last = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
try sema.validateSwitchRange(
@@ -3285,7 +3285,7 @@ fn analyzeSwitch(
{
var scalar_i: u32 = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
- const item_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_index]);
+ const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const body_len = sema.code.extra[extra_index];
extra_index += 1;
@@ -3368,7 +3368,7 @@ fn analyzeSwitch(
{
var scalar_i: u32 = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
- const item_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_index]);
+ const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const body_len = sema.code.extra[extra_index];
extra_index += 1;
@@ -3435,7 +3435,7 @@ fn analyzeSwitch(
{
var scalar_i: usize = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
- const item_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_index]);
+ const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const body_len = sema.code.extra[extra_index];
extra_index += 1;
@@ -3474,9 +3474,9 @@ fn analyzeSwitch(
var range_i: usize = 0;
while (range_i < ranges_len) : (range_i += 1) {
- const item_first = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_index]);
+ const item_first = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
- const item_last = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_index]);
+ const item_last = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
// Validation above ensured these will succeed.
@@ -3544,7 +3544,7 @@ fn analyzeSwitch(
var scalar_i: usize = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
- const item_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_index]);
+ const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const body_len = sema.code.extra[extra_index];
extra_index += 1;
@@ -3597,9 +3597,9 @@ fn analyzeSwitch(
var range_i: usize = 0;
while (range_i < ranges_len) : (range_i += 1) {
- const first_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_index]);
+ const first_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
- const last_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_index]);
+ const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const item_first = try sema.resolveInst(first_ref);
@@ -3696,7 +3696,7 @@ fn analyzeSwitch(
fn resolveSwitchItemVal(
sema: *Sema,
block: *Scope.Block,
- item_ref: zir.Inst.Ref,
+ item_ref: Zir.Inst.Ref,
switch_node_offset: i32,
switch_prong_src: AstGen.SwitchProngSrc,
range_expand: AstGen.SwitchProngSrc.RangeExpand,
@@ -3720,8 +3720,8 @@ fn validateSwitchRange(
sema: *Sema,
block: *Scope.Block,
range_set: *RangeSet,
- first_ref: zir.Inst.Ref,
- last_ref: zir.Inst.Ref,
+ first_ref: Zir.Inst.Ref,
+ last_ref: Zir.Inst.Ref,
src_node_offset: i32,
switch_prong_src: AstGen.SwitchProngSrc,
) InnerError!void {
@@ -3735,7 +3735,7 @@ fn validateSwitchItem(
sema: *Sema,
block: *Scope.Block,
range_set: *RangeSet,
- item_ref: zir.Inst.Ref,
+ item_ref: Zir.Inst.Ref,
src_node_offset: i32,
switch_prong_src: AstGen.SwitchProngSrc,
) InnerError!void {
@@ -3748,7 +3748,7 @@ fn validateSwitchItemEnum(
sema: *Sema,
block: *Scope.Block,
seen_fields: []?AstGen.SwitchProngSrc,
- item_ref: zir.Inst.Ref,
+ item_ref: Zir.Inst.Ref,
src_node_offset: i32,
switch_prong_src: AstGen.SwitchProngSrc,
) InnerError!void {
@@ -3815,7 +3815,7 @@ fn validateSwitchItemBool(
block: *Scope.Block,
true_count: *u8,
false_count: *u8,
- item_ref: zir.Inst.Ref,
+ item_ref: Zir.Inst.Ref,
src_node_offset: i32,
switch_prong_src: AstGen.SwitchProngSrc,
) InnerError!void {
@@ -3837,7 +3837,7 @@ fn validateSwitchItemSparse(
sema: *Sema,
block: *Scope.Block,
seen_values: *ValueSrcMap,
- item_ref: zir.Inst.Ref,
+ item_ref: Zir.Inst.Ref,
src_node_offset: i32,
switch_prong_src: AstGen.SwitchProngSrc,
) InnerError!void {
@@ -3879,12 +3879,12 @@ fn validateSwitchNoRange(
return sema.mod.failWithOwnedErrorMsg(&block.base, msg);
}
-fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const extra = sema.code.extraData(zir.Inst.Bin, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src = inst_data.src();
const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
@@ -3907,7 +3907,7 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError
return mod.constBool(arena, src, false);
}
-fn zirImport(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -3933,13 +3933,13 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!
return mod.constType(sema.arena, src, file.namespace.ty);
}
-fn zirShl(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
return sema.mod.fail(&block.base, sema.src, "TODO implement zirShl", .{});
}
-fn zirShr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
return sema.mod.fail(&block.base, sema.src, "TODO implement zirShr", .{});
@@ -3948,7 +3948,7 @@ fn zirShr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*In
fn zirBitwise(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
ir_tag: ir.Inst.Tag,
) InnerError!*Inst {
const tracy = trace(@src());
@@ -3958,7 +3958,7 @@ fn zirBitwise(
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
- const extra = sema.code.extraData(zir.Inst.Bin, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
@@ -4011,19 +4011,19 @@ fn zirBitwise(
return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs);
}
-fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
return sema.mod.fail(&block.base, sema.src, "TODO implement zirBitNot", .{});
}
-fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayCat", .{});
}
-fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayMul", .{});
@@ -4032,8 +4032,8 @@ fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerErro
fn zirNegate(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
- tag_override: zir.Inst.Tag,
+ inst: Zir.Inst.Index,
+ tag_override: Zir.Inst.Tag,
) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -4048,7 +4048,7 @@ fn zirNegate(
return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src);
}
-fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -4057,7 +4057,7 @@ fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerEr
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
- const extra = sema.code.extraData(zir.Inst.Bin, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
@@ -4067,7 +4067,7 @@ fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerEr
fn analyzeArithmetic(
sema: *Sema,
block: *Scope.Block,
- zir_tag: zir.Inst.Tag,
+ zir_tag: Zir.Inst.Tag,
lhs: *Inst,
rhs: *Inst,
src: LazySrcLoc,
@@ -4174,7 +4174,7 @@ fn analyzeArithmetic(
return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs);
}
-fn zirLoad(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -4188,7 +4188,7 @@ fn zirLoad(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*I
fn zirAsm(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
is_volatile: bool,
) InnerError!*Inst {
const tracy = trace(@src());
@@ -4198,7 +4198,7 @@ fn zirAsm(
const src = inst_data.src();
const asm_source_src: LazySrcLoc = .{ .node_offset_asm_source = inst_data.src_node };
const ret_ty_src: LazySrcLoc = .{ .node_offset_asm_ret_ty = inst_data.src_node };
- const extra = sema.code.extraData(zir.Inst.Asm, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.Asm, inst_data.payload_index);
const return_type = try sema.resolveType(block, ret_ty_src, extra.data.return_type);
const asm_source = try sema.resolveConstString(block, asm_source_src, extra.data.asm_source);
@@ -4218,7 +4218,7 @@ fn zirAsm(
const clobbers = try sema.arena.alloc([]const u8, extra.data.clobbers_len);
for (args) |*arg| {
- arg.* = try sema.resolveInst(@intToEnum(zir.Inst.Ref, sema.code.extra[extra_i]));
+ arg.* = try sema.resolveInst(@intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]));
extra_i += 1;
}
for (inputs) |*name| {
@@ -4253,7 +4253,7 @@ fn zirAsm(
fn zirCmp(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
op: std.math.CompareOperator,
) InnerError!*Inst {
const tracy = trace(@src());
@@ -4262,7 +4262,7 @@ fn zirCmp(
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const extra = sema.code.extraData(zir.Inst.Bin, inst_data.payload_index).data;
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src: LazySrcLoc = inst_data.src();
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
@@ -4356,7 +4356,7 @@ fn zirCmp(
return block.addBinOp(src, bool_type, tag, casted_lhs, casted_rhs);
}
-fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
@@ -4366,7 +4366,7 @@ fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!
return sema.mod.constIntUnsigned(sema.arena, src, Type.initTag(.comptime_int), abi_size);
}
-fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
@@ -4376,20 +4376,20 @@ fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerErr
return sema.mod.constIntUnsigned(sema.arena, src, Type.initTag(.comptime_int), bit_size);
}
-fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirTypeInfo", .{});
}
-fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
return sema.mod.constType(sema.arena, src, operand.ty);
}
-fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_ptr = try sema.resolveInst(inst_data.operand);
@@ -4397,13 +4397,13 @@ fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerEr
return sema.mod.constType(sema.arena, src, elem_ty);
}
-fn zirTypeofPeer(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirTypeofPeer(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- const extra = sema.code.extraData(zir.Inst.MultiOp, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
const args = sema.code.refSlice(extra.end, extra.data.operands_len);
const inst_list = try sema.gpa.alloc(*ir.Inst, extra.data.operands_len);
@@ -4417,7 +4417,7 @@ fn zirTypeofPeer(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerEr
return sema.mod.constType(sema.arena, src, result_type);
}
-fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -4437,7 +4437,7 @@ fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError
fn zirBoolOp(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
comptime is_bool_or: bool,
) InnerError!*Inst {
const tracy = trace(@src());
@@ -4468,7 +4468,7 @@ fn zirBoolOp(
fn zirBoolBr(
sema: *Sema,
parent_block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
is_bool_or: bool,
) InnerError!*Inst {
const tracy = trace(@src());
@@ -4478,7 +4478,7 @@ fn zirBoolBr(
const inst_data = datas[inst].bool_br;
const src: LazySrcLoc = .unneeded;
const lhs = try sema.resolveInst(inst_data.lhs);
- const extra = sema.code.extraData(zir.Inst.Block, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
if (try sema.resolveDefinedValue(parent_block, src, lhs)) |lhs_val| {
@@ -4536,7 +4536,7 @@ fn zirBoolBr(
fn zirIsNull(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
invert_logic: bool,
) InnerError!*Inst {
const tracy = trace(@src());
@@ -4551,7 +4551,7 @@ fn zirIsNull(
fn zirIsNullPtr(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
invert_logic: bool,
) InnerError!*Inst {
const tracy = trace(@src());
@@ -4564,7 +4564,7 @@ fn zirIsNullPtr(
return sema.analyzeIsNull(block, src, loaded, invert_logic);
}
-fn zirIsErr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirIsErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -4573,7 +4573,7 @@ fn zirIsErr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*
return sema.analyzeIsErr(block, inst_data.src(), operand);
}
-fn zirIsErrPtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirIsErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -4587,15 +4587,15 @@ fn zirIsErrPtr(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerErro
fn zirCondbr(
sema: *Sema,
parent_block: *Scope.Block,
- inst: zir.Inst.Index,
-) InnerError!zir.Inst.Index {
+ inst: Zir.Inst.Index,
+) InnerError!Zir.Inst.Index {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node };
- const extra = sema.code.extraData(zir.Inst.CondBr, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index);
const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len];
const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
@@ -4628,7 +4628,7 @@ fn zirCondbr(
return always_noreturn;
}
-fn zirUnreachable(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!zir.Inst.Index {
+fn zirUnreachable(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index {
const tracy = trace(@src());
defer tracy.end();
@@ -4648,9 +4648,9 @@ fn zirUnreachable(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerE
fn zirRetTok(
sema: *Sema,
block: *Scope.Block,
- inst: zir.Inst.Index,
+ inst: Zir.Inst.Index,
need_coercion: bool,
-) InnerError!zir.Inst.Index {
+) InnerError!Zir.Inst.Index {
const tracy = trace(@src());
defer tracy.end();
@@ -4661,7 +4661,7 @@ fn zirRetTok(
return sema.analyzeRet(block, operand, src, need_coercion);
}
-fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!zir.Inst.Index {
+fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index {
const tracy = trace(@src());
defer tracy.end();
@@ -4678,7 +4678,7 @@ fn analyzeRet(
operand: *Inst,
src: LazySrcLoc,
need_coercion: bool,
-) InnerError!zir.Inst.Index {
+) InnerError!Zir.Inst.Index {
if (block.inlining) |inlining| {
// We are inlining a function call; rewrite the `ret` as a `break`.
try inlining.merges.results.append(sema.gpa, operand);
@@ -4702,7 +4702,7 @@ fn analyzeRet(
return always_noreturn;
}
-fn floatOpAllowed(tag: zir.Inst.Tag) bool {
+fn floatOpAllowed(tag: Zir.Inst.Tag) bool {
// extend this swich as additional operators are implemented
return switch (tag) {
.add, .sub => true,
@@ -4710,7 +4710,7 @@ fn floatOpAllowed(tag: zir.Inst.Tag) bool {
};
}
-fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -4731,36 +4731,36 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) Inne
return sema.mod.constType(sema.arena, .unneeded, ty);
}
-fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
const src: LazySrcLoc = .unneeded;
const inst_data = sema.code.instructions.items(.data)[inst].ptr_type;
- const extra = sema.code.extraData(zir.Inst.PtrType, inst_data.payload_index);
+ const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index);
var extra_i = extra.end;
const sentinel = if (inst_data.flags.has_sentinel) blk: {
- const ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_i]);
+ const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]);
extra_i += 1;
break :blk (try sema.resolveInstConst(block, .unneeded, ref)).val;
} else null;
const abi_align = if (inst_data.flags.has_align) blk: {
- const ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_i]);
+ const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]);
extra_i += 1;
break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u32);
} else 0;
const bit_start = if (inst_data.flags.has_bit_range) blk: {
- const ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_i]);
+ const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]);
extra_i += 1;
break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u16);
} else 0;
const bit_end = if (inst_data.flags.has_bit_range) blk: {
- const ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_i]);
+ const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]);
extra_i += 1;
break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u16);
} else 0;
@@ -4785,7 +4785,7 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError
return sema.mod.constType(sema.arena, src, ty);
}
-fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -4799,13 +4799,13 @@ fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) In
});
}
-fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
return sema.mod.fail(&block.base, src, "TODO: Sema.zirStructInit", .{});
}
-fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst {
+fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldType", .{});
@@ -4895,7 +4895,7 @@ fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id:
try parent_block.instructions.append(sema.gpa, &block_inst.base);
}
-fn safetyPanic(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, panic_id: PanicId) !zir.Inst.Index {
+fn safetyPanic(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, panic_id: PanicId) !Zir.Inst.Index {
// TODO Once we have a panic function to call, call it here instead of breakpoint.
_ = try block.addNoOp(src, Type.initTag(.void), .breakpoint);
_ = try block.addNoOp(src, Type.initTag(.noreturn), .unreach);
diff --git a/src/zir.zig b/src/zir.zig
index 44c22d41c7..bb1ac5fbc2 100644
--- a/src/zir.zig
+++ b/src/zir.zig
@@ -1,5 +1,14 @@
//! Zig Intermediate Representation. Astgen.zig converts AST nodes to these
//! untyped IR instructions. Next, Sema.zig processes these into TZIR.
+//! The minimum amount of information needed to represent a list of ZIR instructions.
+//! Once this structure is completed, it can be used to generate TZIR, followed by
+//! machine code, without any memory access into the AST tree token list, node list,
+//! or source bytes. Exceptions include:
+//! * Compile errors, which may need to reach into these data structures to
+//! create a useful report.
+//! * In the future, possibly inline assembly, which needs to get parsed and
+//! handled by the codegen backend, and errors reported there. However for now,
+//! inline assembly is not an exception.
const std = @import("std");
const mem = std.mem;
@@ -9,6 +18,7 @@ const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const ast = std.zig.ast;
+const Zir = @This();
const Type = @import("type.zig").Type;
const Value = @import("value.zig").Value;
const TypedValue = @import("TypedValue.zig");
@@ -16,96 +26,85 @@ const ir = @import("ir.zig");
const Module = @import("Module.zig");
const LazySrcLoc = Module.LazySrcLoc;
-/// The minimum amount of information needed to represent a list of ZIR instructions.
-/// Once this structure is completed, it can be used to generate TZIR, followed by
-/// machine code, without any memory access into the AST tree token list, node list,
-/// or source bytes. Exceptions include:
-/// * Compile errors, which may need to reach into these data structures to
-/// create a useful report.
-/// * In the future, possibly inline assembly, which needs to get parsed and
-/// handled by the codegen backend, and errors reported there. However for now,
-/// inline assembly is not an exception.
-pub const Code = struct {
- /// There is always implicitly a `block` instruction at index 0.
- /// This is so that `break_inline` can break from the root block.
- instructions: std.MultiArrayList(Inst).Slice,
- /// In order to store references to strings in fewer bytes, we copy all
- /// string bytes into here. String bytes can be null. It is up to whomever
- /// is referencing the data here whether they want to store both index and length,
- /// thus allowing null bytes, or store only index, and use null-termination. The
- /// `string_bytes` array is agnostic to either usage.
- string_bytes: []u8,
- /// The meaning of this data is determined by `Inst.Tag` value.
- extra: []u32,
-
- /// Returns the requested data, as well as the new index which is at the start of the
- /// trailers for the object.
- pub fn extraData(code: Code, comptime T: type, index: usize) struct { data: T, end: usize } {
- const fields = std.meta.fields(T);
- var i: usize = index;
- var result: T = undefined;
- inline for (fields) |field| {
- @field(result, field.name) = switch (field.field_type) {
- u32 => code.extra[i],
- Inst.Ref => @intToEnum(Inst.Ref, code.extra[i]),
- else => unreachable,
- };
- i += 1;
- }
- return .{
- .data = result,
- .end = i,
+/// There is always implicitly a `block` instruction at index 0.
+/// This is so that `break_inline` can break from the root block.
+instructions: std.MultiArrayList(Inst).Slice,
+/// In order to store references to strings in fewer bytes, we copy all
+/// string bytes into here. String bytes can be null. It is up to whomever
+/// is referencing the data here whether they want to store both index and length,
+/// thus allowing null bytes, or store only index, and use null-termination. The
+/// `string_bytes` array is agnostic to either usage.
+string_bytes: []u8,
+/// The meaning of this data is determined by `Inst.Tag` value.
+extra: []u32,
+
+/// Returns the requested data, as well as the new index which is at the start of the
+/// trailers for the object.
+pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, end: usize } {
+ const fields = std.meta.fields(T);
+ var i: usize = index;
+ var result: T = undefined;
+ inline for (fields) |field| {
+ @field(result, field.name) = switch (field.field_type) {
+ u32 => code.extra[i],
+ Inst.Ref => @intToEnum(Inst.Ref, code.extra[i]),
+ else => unreachable,
};
+ i += 1;
}
+ return .{
+ .data = result,
+ .end = i,
+ };
+}
- /// Given an index into `string_bytes` returns the null-terminated string found there.
- pub fn nullTerminatedString(code: Code, index: usize) [:0]const u8 {
- var end: usize = index;
- while (code.string_bytes[end] != 0) {
- end += 1;
- }
- return code.string_bytes[index..end :0];
- }
-
- pub fn refSlice(code: Code, start: usize, len: usize) []Inst.Ref {
- const raw_slice = code.extra[start..][0..len];
- return @bitCast([]Inst.Ref, raw_slice);
- }
-
- pub fn deinit(code: *Code, gpa: *Allocator) void {
- code.instructions.deinit(gpa);
- gpa.free(code.string_bytes);
- gpa.free(code.extra);
- code.* = undefined;
+/// Given an index into `string_bytes` returns the null-terminated string found there.
+pub fn nullTerminatedString(code: Zir, index: usize) [:0]const u8 {
+ var end: usize = index;
+ while (code.string_bytes[end] != 0) {
+ end += 1;
}
+ return code.string_bytes[index..end :0];
+}
+
+pub fn refSlice(code: Zir, start: usize, len: usize) []Inst.Ref {
+ const raw_slice = code.extra[start..][0..len];
+ return @bitCast([]Inst.Ref, raw_slice);
+}
+
+pub fn deinit(code: *Zir, gpa: *Allocator) void {
+ code.instructions.deinit(gpa);
+ gpa.free(code.string_bytes);
+ gpa.free(code.extra);
+ code.* = undefined;
+}
+
+/// For debugging purposes, like dumpFn but for unanalyzed zir blocks
+pub fn dump(
+ code: Zir,
+ gpa: *Allocator,
+ kind: []const u8,
+ scope: *Module.Scope,
+ param_count: usize,
+) !void {
+ var arena = std.heap.ArenaAllocator.init(gpa);
+ defer arena.deinit();
+
+ var writer: Writer = .{
+ .gpa = gpa,
+ .arena = &arena.allocator,
+ .scope = scope,
+ .code = code,
+ .indent = 0,
+ .param_count = param_count,
+ };
- /// For debugging purposes, like dumpFn but for unanalyzed zir blocks
- pub fn dump(
- code: Code,
- gpa: *Allocator,
- kind: []const u8,
- scope: *Module.Scope,
- param_count: usize,
- ) !void {
- var arena = std.heap.ArenaAllocator.init(gpa);
- defer arena.deinit();
-
- var writer: Writer = .{
- .gpa = gpa,
- .arena = &arena.allocator,
- .scope = scope,
- .code = code,
- .indent = 0,
- .param_count = param_count,
- };
-
- const decl_name = scope.srcDecl().?.name;
- const stderr = std.io.getStdErr().writer();
- try stderr.print("ZIR {s} {s} %0 ", .{ kind, decl_name });
- try writer.writeInstToStream(stderr, 0);
- try stderr.print(" // end ZIR {s} {s}\n\n", .{ kind, decl_name });
- }
-};
+ const decl_name = scope.srcDecl().?.name;
+ const stderr = std.io.getStdErr().writer();
+ try stderr.print("ZIR {s} {s} %0 ", .{ kind, decl_name });
+ try writer.writeInstToStream(stderr, 0);
+ try stderr.print(" // end ZIR {s} {s}\n\n", .{ kind, decl_name });
+}
/// These are untyped instructions generated from an Abstract Syntax Tree.
/// The data here is immutable because it is possible to have multiple
@@ -885,7 +884,7 @@ pub const Inst = struct {
}
};
- /// The position of a ZIR instruction within the `Code` instructions array.
+ /// The position of a ZIR instruction within the `Zir` instructions array.
pub const Index = u32;
/// A reference to a TypedValue, parameter of the current function,
@@ -1236,7 +1235,7 @@ pub const Inst = struct {
/// Number of bytes in the string.
len: u32,
- pub fn get(self: @This(), code: Code) []const u8 {
+ pub fn get(self: @This(), code: Zir) []const u8 {
return code.string_bytes[self.start..][0..self.len];
}
},
@@ -1257,7 +1256,7 @@ pub const Inst = struct {
/// Offset from Decl AST token index.
src_tok: u32,
- pub fn get(self: @This(), code: Code) [:0]const u8 {
+ pub fn get(self: @This(), code: Zir) [:0]const u8 {
return code.nullTerminatedString(self.start);
}
@@ -1609,7 +1608,7 @@ const Writer = struct {
gpa: *Allocator,
arena: *Allocator,
scope: *Module.Scope,
- code: Code,
+ code: Zir,
indent: usize,
param_count: usize,