From e80702067959f99944adeb79a59c31699d7e278a Mon Sep 17 00:00:00 2001 From: Loris Cro Date: Mon, 19 Jul 2021 23:21:24 +0200 Subject: Fixed wrong "unable to load" error for non-existing import files - Changed ZIR encoding of `import` metadata from having instruction indexes to storing token indexes. --- src/Compilation.zig | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'src/Compilation.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index 625de58c63..b9055eceed 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2315,7 +2315,7 @@ const AstGenSrc = union(enum) { root, import: struct { importing_file: *Module.Scope.File, - import_inst: Zir.Inst.Index, + import_tok: std.zig.ast.TokenIndex, }, }; @@ -2352,11 +2352,15 @@ fn workerAstGenFile( assert(file.zir_loaded); const imports_index = file.zir.extra[@enumToInt(Zir.ExtraIndex.imports)]; if (imports_index != 0) { - const imports_len = file.zir.extra[imports_index]; + const extra = file.zir.extraData(Zir.Inst.Imports, imports_index); + var import_i: u32 = 0; + var extra_index = extra.end; - for (file.zir.extra[imports_index + 1 ..][0..imports_len]) |import_inst| { - const inst_data = file.zir.instructions.items(.data)[import_inst].str_tok; - const import_path = inst_data.get(file.zir); + while (import_i < extra.data.imports_len) : (import_i += 1) { + const item = file.zir.extraData(Zir.Inst.Imports.Item, extra_index); + extra_index = item.end; + + const import_path = file.zir.nullTerminatedString(item.data.name); const import_result = blk: { const lock = comp.mutex.acquire(); @@ -2370,7 +2374,7 @@ fn workerAstGenFile( }); const sub_src: AstGenSrc = .{ .import = .{ .importing_file = file, - .import_inst = import_inst, + .import_tok = item.data.token, } }; wg.start(); comp.thread_pool.spawn(workerAstGenFile, .{ @@ -2602,12 +2606,11 @@ fn reportRetryableAstGenError( }, .import => |info| blk: { const importing_file = info.importing_file; - const import_inst = info.import_inst; - const inst_data = importing_file.zir.instructions.items(.data)[import_inst].str_tok; + break :blk .{ .file_scope = importing_file, .parent_decl_node = 0, - .lazy = .{ .token_offset = inst_data.src_tok }, + .lazy = .{ .token_abs = info.import_tok }, }; }, }; -- cgit v1.2.3 From ef7080aed1a1a4dc54cb837938e462b4e6720734 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 11 Jul 2021 16:32:11 -0700 Subject: stage2: update Liveness, SPIR-V for new AIR memory layout also do the inline assembly instruction --- BRANCH_TODO | 44 ---- src/Air.zig | 60 ++++-- src/Compilation.zig | 57 +++-- src/Liveness.zig | 1 + src/Module.zig | 36 +++- src/Sema.zig | 563 +++++++++++++++++++++++++------------------------- src/codegen/spirv.zig | 411 ++++++++++++++++++------------------ 7 files changed, 595 insertions(+), 577 deletions(-) (limited to 'src/Compilation.zig') diff --git a/BRANCH_TODO b/BRANCH_TODO index 5bc4d2a2f5..3b946edbbd 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,24 +1,6 @@ * be sure to test debug info of parameters - /// Each bit represents the index of an `Inst` parameter in the `args` field. - /// If a bit is set, it marks the end of the lifetime of the corresponding - /// instruction parameter. For example, 0b101 means that the first and - /// third `Inst` parameters' lifetimes end after this instruction, and will - /// not have any more following references. - /// The most significant bit being set means that the instruction itself is - /// never referenced, in other words its lifetime ends as soon as it finishes. - /// If bit 15 (0b1xxx_xxxx_xxxx_xxxx) is set, it means this instruction itself is unreferenced. - /// If bit 14 (0bx1xx_xxxx_xxxx_xxxx) is set, it means this is a special case and the - /// lifetimes of operands are encoded elsewhere. - deaths: DeathsInt = undefined, - - - pub const DeathsInt = u16; - pub const DeathsBitIndex = std.math.Log2Int(DeathsInt); - pub const unreferenced_bit_index = @typeInfo(DeathsInt).Int.bits - 1; - pub const deaths_bits = unreferenced_bit_index - 1; - pub fn isUnused(self: Inst) bool { return (self.deaths & (1 << unreferenced_bit_index)) != 0; } @@ -115,32 +97,6 @@ - pub const Assembly = struct { - pub const base_tag = Tag.assembly; - - base: Inst, - asm_source: []const u8, - is_volatile: bool, - output_constraint: ?[]const u8, - inputs: []const []const u8, - clobbers: []const []const u8, - args: []const *Inst, - - pub fn operandCount(self: *const Assembly) usize { - return self.args.len; - } - pub fn getOperand(self: *const Assembly, index: usize) ?*Inst { - if (index < self.args.len) - return self.args[index]; - return null; - } - }; - - pub const StructFieldPtr = struct { - struct_ptr: *Inst, - field_index: usize, - }; - /// For debugging purposes, prints a function representation to stderr. pub fn dumpFn(old_module: Module, module_fn: *Module.Fn) void { diff --git a/src/Air.zig b/src/Air.zig index c57232fba0..112845559d 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1,5 +1,7 @@ //! Analyzed Intermediate Representation. -//! Sema inputs ZIR and outputs AIR. +//! This data is produced by Sema and consumed by codegen. +//! Unlike ZIR where there is one instance for an entire source file, each function +//! gets its own `Air` instance. const std = @import("std"); const Value = @import("value.zig").Value; @@ -27,38 +29,48 @@ pub const Inst = struct { data: Data, pub const Tag = enum(u8) { + /// The first N instructions in Air must be one arg instruction per function parameter. + /// Uses the `ty` field. + arg, /// Float or integer addition. For integers, wrapping is undefined behavior. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. add, /// Integer addition. Wrapping is defined to be twos complement wrapping. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. addwrap, /// Float or integer subtraction. For integers, wrapping is undefined behavior. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. sub, /// Integer subtraction. Wrapping is defined to be twos complement wrapping. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. subwrap, /// Float or integer multiplication. For integers, wrapping is undefined behavior. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. mul, /// Integer multiplication. Wrapping is defined to be twos complement wrapping. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. mulwrap, /// Integer or float division. For integers, wrapping is undefined behavior. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. div, /// Allocates stack local memory. /// Uses the `ty` field. alloc, - /// TODO + /// Inline assembly. Uses the `ty_pl` field. Payload is `Asm`. assembly, /// Bitwise AND. `&`. /// Result type is the same as both operands. @@ -80,7 +92,7 @@ pub const Inst = struct { /// Uses the `ty_pl` field with payload `Block`. block, /// Return from a block with a result. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `br` field. br, /// Lowers to a hardware trap instruction, or the next best thing. @@ -109,11 +121,11 @@ pub const Inst = struct { /// Uses the `bin_op` field. cmp_neq, /// Conditional branch. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `pl_op` field. Operand is the condition. Payload is `CondBr`. cond_br, /// Switch branch. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `pl_op` field. Operand is the condition. Payload is `SwitchBr`. switch_br, /// A comptime-known value. Uses the `ty_pl` field, payload is index of @@ -166,7 +178,7 @@ pub const Inst = struct { load, /// A labeled block of code that loops forever. At the end of the body it is implied /// to repeat; no explicit "repeat" instruction terminates loop bodies. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `ty_pl` field. Payload is `Block`. loop, /// Converts a pointer to its address. Result type is always `usize`. @@ -178,7 +190,7 @@ pub const Inst = struct { /// Uses the `ty_op` field. ref, /// Return a value from a function. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `un_op` field. ret, /// Returns a pointer to a global variable. @@ -189,7 +201,7 @@ pub const Inst = struct { /// Uses the `bin_op` field. store, /// Indicates the program counter will never get to this instruction. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. unreach, /// Convert from one float type to another. /// Uses the `ty_op` field. @@ -343,6 +355,16 @@ pub const StructField = struct { field_index: u32, }; +/// Trailing: +/// 0. `Ref` for every outputs_len +/// 1. `Ref` for every inputs_len +pub const Asm = struct { + /// Index to the corresponding ZIR instruction. + /// `asm_source`, `outputs_len`, `inputs_len`, `clobbers_len`, `is_volatile`, and + /// clobbers are found via here. + zir_index: u32, +}; + pub fn getMainBody(air: Air) []const Air.Inst.Index { const body_index = air.extra[@enumToInt(ExtraIndex.main_block)]; const body_len = air.extra[body_index]; @@ -369,3 +391,11 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end .end = i, }; } + +pub fn deinit(air: *Air, gpa: *std.mem.Allocator) void { + air.instructions.deinit(gpa); + gpa.free(air.extra); + gpa.free(air.values); + gpa.free(air.variables); + air.* = undefined; +} diff --git a/src/Compilation.zig b/src/Compilation.zig index b9055eceed..74ad7b2aae 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -13,7 +13,7 @@ const target_util = @import("target.zig"); const Package = @import("Package.zig"); const link = @import("link.zig"); const trace = @import("tracy.zig").trace; -const liveness = @import("liveness.zig"); +const Liveness = @import("Liveness.zig"); const build_options = @import("build_options"); const LibCInstallation = @import("libc_installation.zig").LibCInstallation; const glibc = @import("glibc.zig"); @@ -1922,6 +1922,7 @@ pub fn getCompileLogOutput(self: *Compilation) []const u8 { } pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemory }!void { + const gpa = self.gpa; // If the terminal is dumb, we dont want to show the user all the // output. var progress: std.Progress = .{ .dont_print_on_dumb = true }; @@ -2005,7 +2006,8 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor assert(decl.has_tv); if (decl.val.castTag(.function)) |payload| { const func = payload.data; - switch (func.state) { + + var air = switch (func.state) { .queued => module.analyzeFnBody(decl, func) catch |err| switch (err) { error.AnalysisFail => { assert(func.state != .in_progress); @@ -2016,18 +2018,39 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor .in_progress => unreachable, .inline_only => unreachable, // don't queue work for this .sema_failure, .dependency_failure => continue, - .success => {}, - } - // Here we tack on additional allocations to the Decl's arena. The allocations - // are lifetime annotations in the ZIR. - var decl_arena = decl.value_arena.?.promote(module.gpa); - defer decl.value_arena.?.* = decl_arena.state; + .success => unreachable, // don't queue it twice + }; + defer air.deinit(gpa); + log.debug("analyze liveness of {s}", .{decl.name}); - try liveness.analyze(module.gpa, &decl_arena.allocator, func.body); + var liveness = try Liveness.analyze(gpa, air); + defer liveness.deinit(gpa); if (std.builtin.mode == .Debug and self.verbose_air) { func.dump(module.*); } + + assert(decl.ty.hasCodeGenBits()); + + self.bin_file.updateFunc(module, func, air, liveness) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => { + decl.analysis = .codegen_failure; + continue; + }, + else => { + try module.failed_decls.ensureUnusedCapacity(gpa, 1); + module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( + gpa, + decl.srcLoc(), + "unable to codegen: {s}", + .{@errorName(err)}, + )); + decl.analysis = .codegen_failure_retryable; + continue; + }, + }; + continue; } assert(decl.ty.hasCodeGenBits()); @@ -2039,9 +2062,9 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor continue; }, else => { - try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.count() + 1); + try module.failed_decls.ensureCapacity(gpa, module.failed_decls.count() + 1); module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( - module.gpa, + gpa, decl.srcLoc(), "unable to codegen: {s}", .{@errorName(err)}, @@ -2070,7 +2093,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor @panic("sadly stage2 is omitted from this build to save memory on the CI server"); const module = self.bin_file.options.module.?; const emit_h = module.emit_h.?; - _ = try emit_h.decl_table.getOrPut(module.gpa, decl); + _ = try emit_h.decl_table.getOrPut(gpa, decl); const decl_emit_h = decl.getEmitH(module); const fwd_decl = &decl_emit_h.fwd_decl; fwd_decl.shrinkRetainingCapacity(0); @@ -2079,7 +2102,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor .module = module, .error_msg = null, .decl = decl, - .fwd_decl = fwd_decl.toManaged(module.gpa), + .fwd_decl = fwd_decl.toManaged(gpa), // we don't want to emit optionals and error unions to headers since they have no ABI .typedefs = undefined, }; @@ -2087,14 +2110,14 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor c_codegen.genHeader(&dg) catch |err| switch (err) { error.AnalysisFail => { - try emit_h.failed_decls.put(module.gpa, decl, dg.error_msg.?); + try emit_h.failed_decls.put(gpa, decl, dg.error_msg.?); continue; }, else => |e| return e, }; fwd_decl.* = dg.fwd_decl.moveToUnmanaged(); - fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len); + fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len); }, }, .analyze_decl => |decl| { @@ -2111,9 +2134,9 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor @panic("sadly stage2 is omitted from this build to save memory on the CI server"); const module = self.bin_file.options.module.?; self.bin_file.updateDeclLineNumber(module, decl) catch |err| { - try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.count() + 1); + try module.failed_decls.ensureCapacity(gpa, module.failed_decls.count() + 1); module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( - module.gpa, + gpa, decl.srcLoc(), "unable to update line number: {s}", .{@errorName(err)}, diff --git a/src/Liveness.zig b/src/Liveness.zig index 828614dcbb..84e2495054 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -150,6 +150,7 @@ fn analyzeInst( const gpa = a.gpa; const table = &a.table; const inst_tags = a.air.instructions.items(.tag); + const inst_datas = a.air.instructions.items(.data); // No tombstone for this instruction means it is never referenced, // and its birth marks its own death. Very metal 🤘 diff --git a/src/Module.zig b/src/Module.zig index 2f1dc0b33b..6273243ee2 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -739,8 +739,6 @@ pub const Union = struct { pub const Fn = struct { /// The Decl that corresponds to the function itself. owner_decl: *Decl, - /// undefined unless analysis state is `success`. - body: ir.Body, /// The ZIR instruction that is a function instruction. Use this to find /// the body. We store this rather than the body directly so that when ZIR /// is regenerated on update(), we can map this to the new corresponding @@ -3585,17 +3583,19 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void { mod.gpa.free(kv.value); } -pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void { +pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { const tracy = trace(@src()); defer tracy.end(); + const gpa = mod.gpa; + // Use the Decl's arena for function memory. - var arena = decl.value_arena.?.promote(mod.gpa); + var arena = decl.value_arena.?.promote(gpa); defer decl.value_arena.?.* = arena.state; const fn_ty = decl.ty; - const param_inst_list = try mod.gpa.alloc(*ir.Inst, fn_ty.fnParamLen()); - defer mod.gpa.free(param_inst_list); + const param_inst_list = try gpa.alloc(*ir.Inst, fn_ty.fnParamLen()); + defer gpa.free(param_inst_list); for (param_inst_list) |*param_inst, param_index| { const param_type = fn_ty.fnParamType(param_index); @@ -3615,7 +3615,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void { var sema: Sema = .{ .mod = mod, - .gpa = mod.gpa, + .gpa = gpa, .arena = &arena.allocator, .code = zir, .owner_decl = decl, @@ -3626,6 +3626,11 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void { }; defer sema.deinit(); + // First few indexes of extra are reserved and set at the end. + const reserved_count = @typeInfo(Air.ExtraIndex).Enum.fields.len; + try sema.air_extra.ensureTotalCapacity(gpa, reserved_count); + sema.air_extra.items.len += reserved_count; + var inner_block: Scope.Block = .{ .parent = null, .sema = &sema, @@ -3634,20 +3639,29 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void { .inlining = null, .is_comptime = false, }; - defer inner_block.instructions.deinit(mod.gpa); + defer inner_block.instructions.deinit(gpa); // AIR currently requires the arg parameters to be the first N instructions - try inner_block.instructions.appendSlice(mod.gpa, param_inst_list); + try inner_block.instructions.appendSlice(gpa, param_inst_list); func.state = .in_progress; log.debug("set {s} to in_progress", .{decl.name}); try sema.analyzeFnBody(&inner_block, func.zir_body_inst); - const instructions = try arena.allocator.dupe(*ir.Inst, inner_block.instructions.items); + // Copy the block into place and mark that as the main block. + sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = sema.air_extra.items.len; + try sema.air_extra.appendSlice(inner_block.instructions.items); + func.state = .success; - func.body = .{ .instructions = instructions }; log.debug("set {s} to success", .{decl.name}); + + return Air{ + .instructions = sema.air_instructions.toOwnedSlice(), + .extra = sema.air_extra.toOwnedSlice(), + .values = sema.air_values.toOwnedSlice(), + .variables = sema.air_variables.toOwnedSlice(), + }; } fn markOutdatedDecl(mod: *Module, decl: *Decl) !void { diff --git a/src/Sema.zig b/src/Sema.zig index 85cb4aa423..b4e10837af 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1,6 +1,6 @@ //! Semantic analysis of ZIR instructions. //! Shared to every Block. Stored on the stack. -//! State used for compiling a `Zir` into AIR. +//! State used for compiling a ZIR into AIR. //! Transforms untyped ZIR instructions into semantically-analyzed AIR instructions. //! Does type checking, comptime control flow, and safety-check generation. //! This is the the heart of the Zig compiler. @@ -11,6 +11,10 @@ gpa: *Allocator, /// Points to the arena allocator of the Decl. arena: *Allocator, code: Zir, +air_instructions: std.MultiArrayList(Air.Inst) = .{}, +air_extra: ArrayListUnmanaged(u32) = .{}, +air_values: ArrayListUnmanaged(Value) = .{}, +air_variables: ArrayListUnmanaged(Module.Var) = .{}, /// Maps ZIR to AIR. inst_map: InstMap = .{}, /// When analyzing an inline function call, owner_decl is the Decl of the caller @@ -32,7 +36,7 @@ func: ?*Module.Fn, /// > Denormalized data to make `resolveInst` faster. This is 0 if not inside a function, /// > otherwise it is the number of parameters of the function. /// > param_count: u32 -param_inst_list: []const *ir.Inst, +param_inst_list: []const Air.Inst.Index, branch_quota: u32 = 1000, branch_count: u32 = 0, /// This field is updated when a new source location becomes active, so that @@ -65,10 +69,15 @@ const LazySrcLoc = Module.LazySrcLoc; const RangeSet = @import("RangeSet.zig"); const target_util = @import("target.zig"); -pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, *ir.Inst); +pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Index); pub fn deinit(sema: *Sema) void { - sema.inst_map.deinit(sema.gpa); + const gpa = sema.gpa; + sema.air_instructions.deinit(gpa); + sema.air_extra.deinit(gpa); + sema.air_values.deinit(gpa); + sema.air_variables.deinit(gpa); + sema.inst_map.deinit(gpa); sema.* = undefined; } @@ -108,7 +117,7 @@ pub fn analyzeFnBody( /// Returns only the result from the body that is specified. /// Only appropriate to call when it is determined at comptime that this body /// has no peers. -fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!*Inst { +fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!Air.Inst.Index { const break_inst = try sema.analyzeBody(block, body); const operand_ref = sema.code.instructions.items(.data)[break_inst].@"break".operand; return sema.resolveInst(operand_ref); @@ -533,7 +542,7 @@ pub fn analyzeBody( } } -fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const extended = sema.code.instructions.items(.data)[inst].extended; switch (extended.opcode) { // zig fmt: off @@ -569,7 +578,7 @@ fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } /// TODO when we rework AIR memory layout, this function will no longer have a possible error. -pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) error{OutOfMemory}!*ir.Inst { +pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) error{OutOfMemory}!Air.Inst.Index { var i: usize = @enumToInt(zir_ref); // First section of indexes correspond to a set number of constant values. @@ -618,19 +627,19 @@ pub fn resolveType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, zir_ref: Z return sema.resolveAirAsType(block, src, air_inst); } -fn resolveAirAsType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, air_inst: *ir.Inst) !Type { +fn resolveAirAsType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, air_inst: Air.Inst.Index) !Type { const wanted_type = Type.initTag(.@"type"); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); return val.toType(sema.arena); } -fn resolveConstValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: *ir.Inst) !Value { +fn resolveConstValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !Value { return (try sema.resolveDefinedValue(block, src, base)) orelse return sema.failWithNeededComptime(block, src); } -fn resolveDefinedValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: *ir.Inst) !?Value { +fn resolveDefinedValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !?Value { if (try sema.resolvePossiblyUndefinedValue(block, src, base)) |val| { if (val.isUndef()) { return sema.failWithUseOfUndef(block, src); @@ -644,7 +653,7 @@ fn resolvePossiblyUndefinedValue( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - base: *ir.Inst, + base: Air.Inst.Index, ) !?Value { if (try sema.typeHasOnePossibleValue(block, src, base.ty)) |opv| { return opv; @@ -708,13 +717,13 @@ pub fn resolveInstConst( }; } -fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO implement zir_sema.zirBitcastResultPtr", .{}); } -fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = inst; const tracy = trace(@src()); defer tracy.end(); @@ -749,7 +758,7 @@ fn zirStructDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); @@ -820,7 +829,7 @@ fn zirEnumDecl( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1017,7 +1026,7 @@ fn zirUnionDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1081,7 +1090,7 @@ fn zirOpaqueDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1101,7 +1110,7 @@ fn zirErrorSetDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1141,7 +1150,7 @@ fn zirRetPtr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1153,7 +1162,7 @@ fn zirRetPtr( return block.addNoOp(src, ptr_type, .alloc); } -fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1166,7 +1175,7 @@ fn zirRetType( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1191,7 +1200,7 @@ fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) I fn ensureResultUsed( sema: *Sema, block: *Scope.Block, - operand: *Inst, + operand: Air.Inst.Index, src: LazySrcLoc, ) InnerError!void { switch (operand.ty.zigTypeTag()) { @@ -1213,7 +1222,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde } } -fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1247,7 +1256,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const arg_name = inst_data.get(sema.code); const arg_index = sema.next_arg_index; @@ -1269,13 +1278,13 @@ fn zirAllocExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocExtended", .{}); } -fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1298,13 +1307,13 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne }); } -fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocInferredComptime", .{}); } -fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1317,7 +1326,7 @@ fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!* return block.addNoOp(var_decl_src, ptr_type, .alloc); } -fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1336,7 +1345,7 @@ fn zirAllocInferred( block: *Scope.Block, inst: Zir.Inst.Index, inferred_alloc_ty: Type, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1589,7 +1598,7 @@ fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.storePtr(block, src, ptr, value); } -fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1625,7 +1634,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.constType(sema.arena, src, param_type); } -fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1653,7 +1662,7 @@ fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In return sema.analyzeDeclRef(block, .unneeded, new_decl); } -fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1662,7 +1671,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In return sema.mod.constIntUnsigned(sema.arena, .unneeded, Type.initTag(.comptime_int), int); } -fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1680,7 +1689,7 @@ fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! }); } -fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].float; @@ -1693,7 +1702,7 @@ fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!* }); } -fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; @@ -1722,7 +1731,7 @@ fn zirCompileLog( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { var managed = sema.mod.compile_log_text.toManaged(sema.gpa); defer sema.mod.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); @@ -1772,7 +1781,7 @@ fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Z return sema.panicWithMsg(block, src, msg_inst); } -fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1832,12 +1841,12 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerE // Loop repetition is implied so the last instruction may or may not be a noreturn instruction. try child_block.instructions.append(sema.gpa, &loop_inst.base); - loop_inst.body = .{ .instructions = try sema.arena.dupe(*Inst, loop_block.instructions.items) }; + loop_inst.body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, loop_block.instructions.items) }; return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } -fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1847,13 +1856,13 @@ fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirCImport", .{}); } -fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirSuspendBlock", .{}); } -fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1911,7 +1920,7 @@ fn resolveBlockBody( child_block: *Scope.Block, body: []const Zir.Inst.Index, merges: *Scope.Block.Merges, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { _ = try sema.analyzeBody(child_block, body); return sema.analyzeBlockBody(parent_block, src, child_block, merges); } @@ -1922,7 +1931,7 @@ fn analyzeBlockBody( src: LazySrcLoc, child_block: *Scope.Block, merges: *Scope.Block.Merges, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1933,7 +1942,7 @@ fn analyzeBlockBody( if (merges.results.items.len == 0) { // No need for a block instruction. We can put the new instructions // directly into the parent block. - const copied_instructions = try sema.arena.dupe(*Inst, child_block.instructions.items); + const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items); try parent_block.instructions.appendSlice(sema.gpa, copied_instructions); return copied_instructions[copied_instructions.len - 1]; } @@ -1944,7 +1953,7 @@ fn analyzeBlockBody( if (br_block == merges.block_inst) { // No need for a block instruction. We can put the new instructions directly // into the parent block. Here we omit the break instruction. - const copied_instructions = try sema.arena.dupe(*Inst, child_block.instructions.items[0..last_inst_index]); + const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items[0..last_inst_index]); try parent_block.instructions.appendSlice(sema.gpa, copied_instructions); return merges.results.items[0]; } @@ -1959,7 +1968,7 @@ fn analyzeBlockBody( const resolved_ty = try sema.resolvePeerTypes(parent_block, src, merges.results.items); merges.block_inst.base.ty = resolved_ty; merges.block_inst.body = .{ - .instructions = try sema.arena.dupe(*Inst, child_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items), }; // Now that the block has its type resolved, we need to go back into all the break // instructions, and insert type coercion on the operands. @@ -1991,7 +2000,7 @@ fn analyzeBlockBody( }, .block = merges.block_inst, .body = .{ - .instructions = try sema.arena.dupe(*Inst, coerce_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, coerce_block.instructions.items), }, }; } @@ -2130,7 +2139,7 @@ fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError _ = try block.addDbgStmt(.unneeded, inst_data.line, inst_data.column); } -fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2138,7 +2147,7 @@ fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeDeclRef(block, src, decl); } -fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2192,7 +2201,7 @@ fn zirCall( inst: Zir.Inst.Index, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2204,7 +2213,7 @@ fn zirCall( const func = try sema.resolveInst(extra.data.callee); // TODO handle function calls of generic functions - const resolved_args = try sema.arena.alloc(*Inst, args.len); + const resolved_args = try sema.arena.alloc(Air.Inst.Index, args.len); for (args) |zir_arg, i| { // the args are already casted to the result of a param type instruction. resolved_args[i] = try sema.resolveInst(zir_arg); @@ -2216,13 +2225,13 @@ fn zirCall( fn analyzeCall( sema: *Sema, block: *Scope.Block, - func: *ir.Inst, + func: Air.Inst.Index, func_src: LazySrcLoc, call_src: LazySrcLoc, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, - args: []const *ir.Inst, -) InnerError!*ir.Inst { + args: []const Air.Inst.Index, +) InnerError!Air.Inst.Index { if (func.ty.zigTypeTag() != .Fn) return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func.ty}); @@ -2279,7 +2288,7 @@ fn analyzeCall( const is_comptime_call = block.is_comptime or modifier == .compile_time; const is_inline_call = is_comptime_call or modifier == .always_inline or func.ty.fnCallingConvention() == .Inline; - const result: *Inst = if (is_inline_call) res: { + const result: Air.Inst.Index = if (is_inline_call) res: { const func_val = try sema.resolveConstValue(block, func_src, func); const module_fn = switch (func_val.tag()) { .function => func_val.castTag(.function).?.data, @@ -2377,7 +2386,7 @@ fn analyzeCall( return result; } -fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2389,7 +2398,7 @@ fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.constType(sema.arena, src, ty); } -fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2401,7 +2410,7 @@ fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.mod.constType(sema.arena, src, opt_type); } -fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const array_type = try sema.resolveType(block, src, inst_data.operand); @@ -2409,7 +2418,7 @@ fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.constType(sema.arena, src, elem_type); } -fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -2424,7 +2433,7 @@ fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.mod.constType(sema.arena, src, vector_type); } -fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2437,7 +2446,7 @@ fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.constType(sema.arena, .unneeded, array_ty); } -fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2452,7 +2461,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) return sema.mod.constType(sema.arena, .unneeded, array_ty); } -fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2465,7 +2474,7 @@ fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.mod.constType(sema.arena, src, anyframe_type); } -fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2486,7 +2495,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.constType(sema.arena, src, err_union_ty); } -fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2505,7 +2514,7 @@ fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr }); } -fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2535,7 +2544,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addUnOp(src, result_ty, .bitcast, op_coerced); } -fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2568,7 +2577,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addUnOp(src, Type.initTag(.anyerror), .bitcast, op); } -fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2658,7 +2667,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn }); } -fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2672,7 +2681,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE }); } -fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const mod = sema.mod; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; @@ -2680,7 +2689,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); - const enum_tag: *Inst = switch (operand.ty.zigTypeTag()) { + const enum_tag: Air.Inst.Index = switch (operand.ty.zigTypeTag()) { .Enum => operand, .Union => { //if (!operand.ty.unionHasTag()) { @@ -2754,7 +2763,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return block.addUnOp(src, int_tag_ty, .bitcast, enum_tag); } -fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const mod = sema.mod; const target = mod.getTarget(); const arena = sema.arena; @@ -2815,7 +2824,7 @@ fn zirOptionalPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2858,7 +2867,7 @@ fn zirOptionalPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2896,7 +2905,7 @@ fn zirErrUnionPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2930,7 +2939,7 @@ fn zirErrUnionPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2969,7 +2978,7 @@ fn zirErrUnionPayloadPtr( } /// Value in, value out -fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2995,7 +3004,7 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner } /// Pointer in, value out -fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3042,7 +3051,7 @@ fn zirFunc( block: *Scope.Block, inst: Zir.Inst.Index, inferred_error_set: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3093,7 +3102,7 @@ fn funcCommon( is_extern: bool, src_locs: Zir.Inst.Func.SrcLocs, opt_lib_name: ?[]const u8, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const bare_return_type = try sema.resolveType(block, ret_ty_src, zir_return_type); @@ -3234,7 +3243,7 @@ fn funcCommon( return result; } -fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3242,7 +3251,7 @@ fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Ins return sema.analyzeAs(block, .unneeded, bin_inst.lhs, bin_inst.rhs); } -fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3258,13 +3267,13 @@ fn analyzeAs( src: LazySrcLoc, zir_dest_type: Zir.Inst.Ref, zir_operand: Zir.Inst.Ref, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const dest_type = try sema.resolveType(block, src, zir_dest_type); const operand = try sema.resolveInst(zir_operand); return sema.coerce(block, dest_type, operand, src); } -fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3281,7 +3290,7 @@ fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(src, ty, .ptrtoint, ptr); } -fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3299,7 +3308,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3312,7 +3321,7 @@ fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3327,7 +3336,7 @@ fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3340,7 +3349,7 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3383,7 +3392,7 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten int", .{}); } -fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3396,7 +3405,7 @@ fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.bitcast(block, dest_type, operand); } -fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3439,7 +3448,7 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten float", .{}); } -fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3454,7 +3463,7 @@ fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeLoad(block, sema.src, result_ptr, sema.src); } -fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3472,7 +3481,7 @@ fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3482,7 +3491,7 @@ fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); } -fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3495,7 +3504,7 @@ fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); } -fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3508,7 +3517,7 @@ fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.analyzeSlice(block, src, array_ptr, start, null, null, .unneeded); } -fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3522,7 +3531,7 @@ fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeSlice(block, src, array_ptr, start, end, null, .unneeded); } -fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3544,7 +3553,7 @@ fn zirSwitchCapture( inst: Zir.Inst.Index, is_multi: bool, is_ref: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3563,7 +3572,7 @@ fn zirSwitchCaptureElse( block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3582,7 +3591,7 @@ fn zirSwitchBlock( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3615,7 +3624,7 @@ fn zirSwitchBlockMulti( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3645,14 +3654,14 @@ fn zirSwitchBlockMulti( fn analyzeSwitch( sema: *Sema, block: *Scope.Block, - operand: *Inst, + operand: Air.Inst.Index, extra_end: usize, special_prong: Zir.SpecialProng, scalar_cases_len: usize, multi_cases_len: usize, switch_inst: Zir.Inst.Index, src_node_offset: i32, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const gpa = sema.gpa; const mod = sema.mod; @@ -4187,7 +4196,7 @@ fn analyzeSwitch( cases[scalar_i] = .{ .item = item_val, - .body = .{ .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items) }, + .body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items) }, }; } @@ -4207,7 +4216,7 @@ fn analyzeSwitch( case_block.instructions.shrinkRetainingCapacity(0); - var any_ok: ?*Inst = null; + var any_ok: ?Air.Inst.Index = null; const bool_ty = comptime Type.initTag(.bool); for (items) |item_ref| { @@ -4280,7 +4289,7 @@ fn analyzeSwitch( try case_block.instructions.append(gpa, &new_condbr.base); const cond_body: Body = .{ - .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), }; case_block.instructions.shrinkRetainingCapacity(0); @@ -4288,7 +4297,7 @@ fn analyzeSwitch( extra_index += body_len; _ = try sema.analyzeBody(&case_block, body); new_condbr.then_body = .{ - .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), }; if (prev_condbr) |condbr| { condbr.else_body = cond_body; @@ -4303,7 +4312,7 @@ fn analyzeSwitch( case_block.instructions.shrinkRetainingCapacity(0); _ = try sema.analyzeBody(&case_block, special.body); const else_body: Body = .{ - .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), }; if (prev_condbr) |condbr| { condbr.else_body = else_body; @@ -4507,7 +4516,7 @@ fn validateSwitchNoRange( return sema.mod.failWithOwnedErrorMsg(&block.base, msg); } -fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; _ = extra; @@ -4516,7 +4525,7 @@ fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, src, "TODO implement zirHasField", .{}); } -fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -4541,7 +4550,7 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return mod.constBool(arena, src, false); } -fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4566,13 +4575,13 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return mod.constType(sema.arena, src, file_root_decl.ty); } -fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; _ = inst; return sema.mod.fail(&block.base, sema.src, "TODO implement zirRetErrValueCode", .{}); } -fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4581,7 +4590,7 @@ fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In return sema.mod.fail(&block.base, sema.src, "TODO implement zirShl", .{}); } -fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4594,7 +4603,7 @@ fn zirBitwise( block: *Scope.Block, inst: Zir.Inst.Index, ir_tag: ir.Inst.Tag, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4606,7 +4615,7 @@ fn zirBitwise( const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); - const instructions = &[_]*Inst{ lhs, rhs }; + const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -4652,7 +4661,7 @@ fn zirBitwise( return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); } -fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4660,7 +4669,7 @@ fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.fail(&block.base, sema.src, "TODO implement zirBitNot", .{}); } -fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4668,7 +4677,7 @@ fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayCat", .{}); } -fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4681,7 +4690,7 @@ fn zirNegate( block: *Scope.Block, inst: Zir.Inst.Index, tag_override: Zir.Inst.Tag, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4695,7 +4704,7 @@ fn zirNegate( return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src); } -fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4715,7 +4724,7 @@ fn zirOverflowArithmetic( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4729,13 +4738,13 @@ fn analyzeArithmetic( sema: *Sema, block: *Scope.Block, zir_tag: Zir.Inst.Tag, - lhs: *Inst, - rhs: *Inst, + lhs: Air.Inst.Index, + rhs: Air.Inst.Index, src: LazySrcLoc, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, -) InnerError!*Inst { - const instructions = &[_]*Inst{ lhs, rhs }; +) InnerError!Air.Inst.Index { + const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -4844,7 +4853,7 @@ fn analyzeArithmetic( return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); } -fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4859,7 +4868,7 @@ fn zirAsm( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4899,7 +4908,7 @@ fn zirAsm( }; }; - const args = try sema.arena.alloc(*Inst, inputs_len); + const args = try sema.arena.alloc(Air.Inst.Index, inputs_len); const inputs = try sema.arena.alloc([]const u8, inputs_len); for (args) |*arg, arg_i| { @@ -4943,7 +4952,7 @@ fn zirCmp( block: *Scope.Block, inst: Zir.Inst.Index, op: std.math.CompareOperator, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5009,7 +5018,7 @@ fn zirCmp( return mod.constBool(sema.arena, src, lhs.value().?.eql(rhs.value().?) == (op == .eq)); } - const instructions = &[_]*Inst{ lhs, rhs }; + const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); if (!resolved_type.isSelfComparable(is_equality_cmp)) { return mod.fail(&block.base, src, "operator not allowed for type '{}'", .{resolved_type}); @@ -5041,7 +5050,7 @@ fn zirCmp( return block.addBinOp(src, bool_type, tag, casted_lhs, casted_rhs); } -fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5051,7 +5060,7 @@ fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.constIntUnsigned(sema.arena, src, Type.initTag(.comptime_int), abi_size); } -fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5065,7 +5074,7 @@ fn zirThis( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirThis", .{}); } @@ -5074,7 +5083,7 @@ fn zirRetAddr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirRetAddr", .{}); } @@ -5083,12 +5092,12 @@ fn zirBuiltinSrc( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinSrc", .{}); } -fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); @@ -5131,7 +5140,7 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } } -fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; @@ -5140,7 +5149,7 @@ fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.constType(sema.arena, src, operand.ty); } -fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -5149,13 +5158,13 @@ fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.mod.constType(sema.arena, src, elem_ty); } -fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirTypeofLog2IntType", .{}); } -fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirLog2IntType", .{}); @@ -5165,7 +5174,7 @@ fn zirTypeofPeer( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5173,7 +5182,7 @@ fn zirTypeofPeer( const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; const args = sema.code.refSlice(extra.end, extended.small); - const inst_list = try sema.gpa.alloc(*ir.Inst, args.len); + const inst_list = try sema.gpa.alloc(Air.Inst.Index, args.len); defer sema.gpa.free(inst_list); for (args) |arg_ref, i| { @@ -5184,7 +5193,7 @@ fn zirTypeofPeer( return sema.mod.constType(sema.arena, src, result_type); } -fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5206,7 +5215,7 @@ fn zirBoolOp( block: *Scope.Block, inst: Zir.Inst.Index, comptime is_bool_or: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5237,7 +5246,7 @@ fn zirBoolBr( parent_block: *Scope.Block, inst: Zir.Inst.Index, is_bool_or: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5292,12 +5301,12 @@ fn zirBoolBr( const rhs_result = try sema.resolveBody(rhs_block, body); _ = try rhs_block.addBr(src, block_inst, rhs_result); - const air_then_body: ir.Body = .{ .instructions = try sema.arena.dupe(*Inst, then_block.instructions.items) }; - const air_else_body: ir.Body = .{ .instructions = try sema.arena.dupe(*Inst, else_block.instructions.items) }; + const air_then_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, then_block.instructions.items) }; + const air_else_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, else_block.instructions.items) }; _ = try child_block.addCondBr(src, lhs, air_then_body, air_else_body); block_inst.body = .{ - .instructions = try sema.arena.dupe(*Inst, child_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items), }; try parent_block.instructions.append(sema.gpa, &block_inst.base); return &block_inst.base; @@ -5307,7 +5316,7 @@ fn zirIsNonNull( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5321,7 +5330,7 @@ fn zirIsNonNullPtr( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5332,7 +5341,7 @@ fn zirIsNonNullPtr( return sema.analyzeIsNull(block, src, loaded, true); } -fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5341,7 +5350,7 @@ fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeIsNonErr(block, inst_data.src(), operand); } -fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5385,14 +5394,14 @@ fn zirCondbr( _ = try sema.analyzeBody(&sub_block, then_body); const air_then_body: ir.Body = .{ - .instructions = try sema.arena.dupe(*Inst, sub_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, sub_block.instructions.items), }; sub_block.instructions.shrinkRetainingCapacity(0); _ = try sema.analyzeBody(&sub_block, else_body); const air_else_body: ir.Body = .{ - .instructions = try sema.arena.dupe(*Inst, sub_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, sub_block.instructions.items), }; _ = try parent_block.addCondBr(src, cond, air_then_body, air_else_body); @@ -5470,7 +5479,7 @@ fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError fn analyzeRet( sema: *Sema, block: *Scope.Block, - operand: *Inst, + operand: Air.Inst.Index, src: LazySrcLoc, need_coercion: bool, ) InnerError!Zir.Inst.Index { @@ -5505,7 +5514,7 @@ fn floatOpAllowed(tag: Zir.Inst.Tag) bool { }; } -fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5526,7 +5535,7 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.mod.constType(sema.arena, .unneeded, ty); } -fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5580,7 +5589,7 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.constType(sema.arena, src, ty); } -fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5594,13 +5603,13 @@ fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In }); } -fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnionInitPtr", .{}); } -fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst { +fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { const mod = sema.mod; const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); @@ -5622,7 +5631,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: mem.set(Zir.Inst.Index, found_fields, 0); // The init values to use for the struct instance. - const field_inits = try gpa.alloc(*ir.Inst, struct_obj.fields.count()); + const field_inits = try gpa.alloc(Air.Inst.Index, struct_obj.fields.count()); defer gpa.free(field_inits); var field_i: u32 = 0; @@ -5713,7 +5722,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return mod.fail(&block.base, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{}); } -fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst { +fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5721,7 +5730,7 @@ fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ return sema.mod.fail(&block.base, src, "TODO: Sema.zirStructInitAnon", .{}); } -fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst { +fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5729,7 +5738,7 @@ fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInit", .{}); } -fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst { +fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5737,13 +5746,13 @@ fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_r return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInitAnon", .{}); } -fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldTypeRef", .{}); } -fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const src = inst_data.src(); @@ -5765,7 +5774,7 @@ fn zirErrorReturnTrace( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorReturnTrace", .{}); } @@ -5774,7 +5783,7 @@ fn zirFrame( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrame", .{}); } @@ -5783,84 +5792,84 @@ fn zirFrameAddress( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameAddress", .{}); } -fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignOf", .{}); } -fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBoolToInt", .{}); } -fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirEmbedFile", .{}); } -fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorName", .{}); } -fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnaryMath", .{}); } -fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTagName", .{}); } -fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReify", .{}); } -fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTypeName", .{}); } -fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameType", .{}); } -fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameSize", .{}); } -fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFloatToInt", .{}); } -fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirIntToFloat", .{}); } -fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5923,199 +5932,199 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(src, type_res, .bitcast, operand_coerced); } -fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrSetCast", .{}); } -fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPtrCast", .{}); } -fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTruncate", .{}); } -fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignCast", .{}); } -fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirClz", .{}); } -fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCtz", .{}); } -fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPopCount", .{}); } -fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirByteSwap", .{}); } -fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitReverse", .{}); } -fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivExact", .{}); } -fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivFloor", .{}); } -fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivTrunc", .{}); } -fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMod", .{}); } -fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirRem", .{}); } -fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShlExact", .{}); } -fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShrExact", .{}); } -fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitOffsetOf", .{}); } -fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirOffsetOf", .{}); } -fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCmpxchg", .{}); } -fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirSplat", .{}); } -fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReduce", .{}); } -fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShuffle", .{}); } -fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicLoad", .{}); } -fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicRmw", .{}); } -fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicStore", .{}); } -fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMulAdd", .{}); } -fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinCall", .{}); } -fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldPtrType", .{}); } -fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldParentPtr", .{}); } -fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy", .{}); } -fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset", .{}); } -fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinAsyncCall", .{}); } -fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirResume", .{}); @@ -6126,7 +6135,7 @@ fn zirAwait( block: *Scope.Block, inst: Zir.Inst.Index, is_nosuspend: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -6138,7 +6147,7 @@ fn zirVarExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const src = sema.src; const ty_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at type @@ -6204,7 +6213,7 @@ fn zirFuncExtended( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -6271,7 +6280,7 @@ fn zirCUndef( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCUndef", .{}); @@ -6281,7 +6290,7 @@ fn zirCInclude( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCInclude", .{}); @@ -6291,7 +6300,7 @@ fn zirCDefine( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCDefine", .{}); @@ -6301,7 +6310,7 @@ fn zirWasmMemorySize( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemorySize", .{}); @@ -6311,7 +6320,7 @@ fn zirWasmMemoryGrow( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemoryGrow", .{}); @@ -6321,7 +6330,7 @@ fn zirBuiltinExtern( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinExtern", .{}); @@ -6355,7 +6364,7 @@ pub const PanicId = enum { invalid_error_code, }; -fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id: PanicId) !void { +fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Index, panic_id: PanicId) !void { const block_inst = try sema.arena.create(Inst.Block); block_inst.* = .{ .base = .{ @@ -6364,12 +6373,12 @@ fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id: .src = ok.src, }, .body = .{ - .instructions = try sema.arena.alloc(*Inst, 1), // Only need space for the condbr. + .instructions = try sema.arena.alloc(Air.Inst.Index, 1), // Only need space for the condbr. }, }; const ok_body: ir.Body = .{ - .instructions = try sema.arena.alloc(*Inst, 1), // Only need space for the br_void. + .instructions = try sema.arena.alloc(Air.Inst.Index, 1), // Only need space for the br_void. }; const br_void = try sema.arena.create(Inst.BrVoid); br_void.* = .{ @@ -6395,7 +6404,7 @@ fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id: _ = try sema.safetyPanic(&fail_block, ok.src, panic_id); - const fail_body: ir.Body = .{ .instructions = try sema.arena.dupe(*Inst, fail_block.instructions.items) }; + const fail_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, fail_block.instructions.items) }; const condbr = try sema.arena.create(Inst.CondBr); condbr.* = .{ @@ -6417,7 +6426,7 @@ fn panicWithMsg( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - msg_inst: *ir.Inst, + msg_inst: Air.Inst.Index, ) !Zir.Inst.Index { const mod = sema.mod; const arena = sema.arena; @@ -6438,7 +6447,7 @@ fn panicWithMsg( .ty = try mod.optionalType(arena, ptr_stack_trace_ty), .val = Value.initTag(.null_value), }); - const args = try arena.create([2]*ir.Inst); + const args = try arena.create([2]Air.Inst.Index); args.* = .{ msg_inst, null_stack_trace }; _ = try sema.analyzeCall(block, panic_fn, src, src, .auto, false, args); return always_noreturn; @@ -6494,10 +6503,10 @@ fn namedFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - object_ptr: *Inst, + object_ptr: Air.Inst.Index, field_name: []const u8, field_name_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const mod = sema.mod; const arena = sema.arena; @@ -6647,7 +6656,7 @@ fn analyzeNamespaceLookup( src: LazySrcLoc, namespace: *Scope.Namespace, decl_name: []const u8, -) InnerError!?*Inst { +) InnerError!?Air.Inst.Index { const mod = sema.mod; const gpa = sema.gpa; if (try sema.lookupInNamespace(namespace, decl_name)) |decl| { @@ -6671,11 +6680,11 @@ fn analyzeStructFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - struct_ptr: *Inst, + struct_ptr: Air.Inst.Index, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const mod = sema.mod; const arena = sema.arena; assert(unresolved_struct_ty.zigTypeTag() == .Struct); @@ -6706,11 +6715,11 @@ fn analyzeUnionFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - union_ptr: *Inst, + union_ptr: Air.Inst.Index, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_union_ty: Type, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const mod = sema.mod; const arena = sema.arena; assert(unresolved_union_ty.zigTypeTag() == .Union); @@ -6743,10 +6752,10 @@ fn elemPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: *Inst, - elem_index: *Inst, + array_ptr: Air.Inst.Index, + elem_index: Air.Inst.Index, elem_index_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const array_ty = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, array_ptr.src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -6770,10 +6779,10 @@ fn elemPtrArray( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: *Inst, - elem_index: *Inst, + array_ptr: Air.Inst.Index, + elem_index: Air.Inst.Index, elem_index_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { if (array_ptr.value()) |array_ptr_val| { if (elem_index.value()) |index_val| { // Both array pointer and index are compile-time known. @@ -6798,9 +6807,9 @@ fn coerce( sema: *Sema, block: *Scope.Block, dest_type: Type, - inst: *Inst, + inst: Air.Inst.Index, inst_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { if (dest_type.tag() == .var_args_param) { return sema.coerceVarArgParam(block, inst); } @@ -6976,7 +6985,7 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult return .no_match; } -fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) InnerError!?*Inst { +fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) InnerError!?Air.Inst.Index { const val = inst.value() orelse return null; const src_zig_tag = inst.ty.zigTypeTag(); const dst_zig_tag = dest_type.zigTypeTag(); @@ -7014,7 +7023,7 @@ fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) Inn return null; } -fn coerceVarArgParam(sema: *Sema, block: *Scope.Block, inst: *Inst) !*Inst { +fn coerceVarArgParam(sema: *Sema, block: *Scope.Block, inst: Air.Inst.Index) !Air.Inst.Index { switch (inst.ty.zigTypeTag()) { .ComptimeInt, .ComptimeFloat => return sema.mod.fail(&block.base, inst.src, "integer and float literals in var args function must be casted", .{}), else => {}, @@ -7027,8 +7036,8 @@ fn storePtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - ptr: *Inst, - uncasted_value: *Inst, + ptr: Air.Inst.Index, + uncasted_value: Air.Inst.Index, ) !void { if (ptr.ty.isConstPtr()) return sema.mod.fail(&block.base, src, "cannot assign to constant", .{}); @@ -7076,7 +7085,7 @@ fn storePtr( _ = try block.addBinOp(src, Type.initTag(.void), .store, ptr, value); } -fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { if (inst.value()) |val| { // Keep the comptime Value representation; take the new type. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7086,7 +7095,7 @@ fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Ins return block.addUnOp(inst.src, dest_type, .bitcast, inst); } -fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7094,7 +7103,7 @@ fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{}); } -fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7102,12 +7111,12 @@ fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); } -fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!*Inst { +fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index { const decl_ref = try sema.analyzeDeclRef(block, src, decl); return sema.analyzeLoad(block, src, decl_ref, src); } -fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!*Inst { +fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index { try sema.mod.declareDeclDependency(sema.owner_decl, decl); sema.mod.ensureDeclAnalyzed(decl) catch |err| { if (sema.func) |func| { @@ -7128,7 +7137,7 @@ fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl }); } -fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!*Inst { +fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!Air.Inst.Index { const variable = tv.val.castTag(.variable).?.data; const ty = try sema.mod.simplePtrType(sema.arena, tv.ty, variable.is_mutable, .One); @@ -7157,8 +7166,8 @@ fn analyzeRef( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: *Inst, -) InnerError!*Inst { + operand: Air.Inst.Index, +) InnerError!Air.Inst.Index { const ptr_type = try sema.mod.simplePtrType(sema.arena, operand.ty, false, .One); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |val| { @@ -7176,9 +7185,9 @@ fn analyzeLoad( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - ptr: *Inst, + ptr: Air.Inst.Index, ptr_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const elem_ty = switch (ptr.ty.zigTypeTag()) { .Pointer => ptr.ty.elemType(), else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr.ty}), @@ -7201,9 +7210,9 @@ fn analyzeIsNull( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: *Inst, + operand: Air.Inst.Index, invert_logic: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const result_ty = Type.initTag(.bool); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |opt_val| { if (opt_val.isUndef()) { @@ -7222,8 +7231,8 @@ fn analyzeIsNonErr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: *Inst, -) InnerError!*Inst { + operand: Air.Inst.Index, +) InnerError!Air.Inst.Index { const ot = operand.ty.zigTypeTag(); if (ot != .ErrorSet and ot != .ErrorUnion) return sema.mod.constBool(sema.arena, src, true); if (ot == .ErrorSet) return sema.mod.constBool(sema.arena, src, false); @@ -7243,12 +7252,12 @@ fn analyzeSlice( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: *Inst, - start: *Inst, - end_opt: ?*Inst, - sentinel_opt: ?*Inst, + array_ptr: Air.Inst.Index, + start: Air.Inst.Index, + end_opt: ?Air.Inst.Index, + sentinel_opt: ?Air.Inst.Index, sentinel_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const ptr_child = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -7319,10 +7328,10 @@ fn cmpNumeric( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - lhs: *Inst, - rhs: *Inst, + lhs: Air.Inst.Index, + rhs: Air.Inst.Index, op: std.math.CompareOperator, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { assert(lhs.ty.isNumeric()); assert(rhs.ty.isNumeric()); @@ -7488,7 +7497,7 @@ fn cmpNumeric( return block.addBinOp(src, Type.initTag(.bool), Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } -fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { if (inst.value()) |val| { return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); } @@ -7497,7 +7506,7 @@ fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) return block.addUnOp(inst.src, dest_type, .wrap_optional, inst); } -fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { const err_union = dest_type.castTag(.error_union).?; if (inst.value()) |val| { if (inst.ty.zigTypeTag() != .ErrorSet) { @@ -7568,7 +7577,7 @@ fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst } } -fn resolvePeerTypes(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, instructions: []*Inst) !Type { +fn resolvePeerTypes(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, instructions: []Air.Inst.Index) !Type { if (instructions.len == 0) return Type.initTag(.noreturn); @@ -7704,7 +7713,7 @@ fn getBuiltin( block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!*ir.Inst { +) InnerError!Air.Inst.Index { const mod = sema.mod; const std_pkg = mod.root_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 60e9a96275..4a9087d7f5 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -18,14 +18,14 @@ pub const Word = u32; pub const ResultId = u32; pub const TypeMap = std.HashMap(Type, u32, Type.HashContext64, std.hash_map.default_max_load_percentage); -pub const InstMap = std.AutoHashMap(*Inst, ResultId); +pub const InstMap = std.AutoHashMap(Air.Inst.Index, ResultId); const IncomingBlock = struct { src_label_id: ResultId, break_value_id: ResultId, }; -pub const BlockMap = std.AutoHashMap(*Inst.Block, struct { +pub const BlockMap = std.AutoHashMap(Air.Inst.Index, struct { label_id: ResultId, incoming_blocks: *std.ArrayListUnmanaged(IncomingBlock), }); @@ -279,16 +279,17 @@ pub const DeclGen = struct { return self.spv.module.getTarget(); } - fn fail(self: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) Error { + fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); + const src: LazySrcLoc = .{ .node_offset = 0 }; const src_loc = src.toSrcLocWithDecl(self.decl); self.error_msg = try Module.ErrorMsg.create(self.spv.module.gpa, src_loc, format, args); return error.AnalysisFail; } - fn resolve(self: *DeclGen, inst: *Inst) !ResultId { + fn resolve(self: *DeclGen, inst: Air.Inst.Index) !ResultId { if (inst.value()) |val| { - return self.genConstant(inst.src, inst.ty, val); + return self.genConstant(inst.ty, val); } return self.inst_results.get(inst).?; // Instruction does not dominate all uses! @@ -313,7 +314,7 @@ pub const DeclGen = struct { const target = self.getTarget(); // The backend will never be asked to compiler a 0-bit integer, so we won't have to handle those in this function. - std.debug.assert(bits != 0); + assert(bits != 0); // 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively. // 32-bit integers are always supported (see spec, 2.16.1, Data rules). @@ -387,19 +388,19 @@ pub const DeclGen = struct { .composite_integer }; }, // As of yet, there is no vector support in the self-hosted compiler. - .Vector => self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}), + .Vector => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}), // TODO: For which types is this the case? - else => self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}), + else => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}), }; } /// Generate a constant representing `val`. /// TODO: Deduplication? - fn genConstant(self: *DeclGen, src: LazySrcLoc, ty: Type, val: Value) Error!ResultId { + fn genConstant(self: *DeclGen, ty: Type, val: Value) Error!ResultId { const target = self.getTarget(); const code = &self.spv.binary.types_globals_constants; const result_id = self.spv.allocResultId(); - const result_type_id = try self.genType(src, ty); + const result_type_id = try self.genType(ty); if (val.isUndef()) { try writeInstruction(code, .OpUndef, &[_]Word{ result_type_id, result_id }); @@ -411,13 +412,13 @@ pub const DeclGen = struct { const int_info = ty.intInfo(target); const backing_bits = self.backingIntBits(int_info.bits) orelse { // Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits. - return self.fail(src, "TODO: SPIR-V backend: implement composite int constants for {}", .{ty}); + return self.fail("TODO: SPIR-V backend: implement composite int constants for {}", .{ty}); }; // We can just use toSignedInt/toUnsignedInt here as it returns u64 - a type large enough to hold any // SPIR-V native type (up to i/u64 with Int64). If SPIR-V ever supports native ints of a larger size, this // might need to be updated. - std.debug.assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64)); + assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64)); var int_bits = if (ty.isSignedInt()) @bitCast(u64, val.toSignedInt()) else val.toUnsignedInt(); // Mask the low bits which make up the actual integer. This is to make sure that negative values @@ -469,13 +470,13 @@ pub const DeclGen = struct { } }, .Void => unreachable, - else => return self.fail(src, "TODO: SPIR-V backend: constant generation of type {}", .{ty}), + else => return self.fail("TODO: SPIR-V backend: constant generation of type {}", .{ty}), } return result_id; } - fn genType(self: *DeclGen, src: LazySrcLoc, ty: Type) Error!ResultId { + fn genType(self: *DeclGen, ty: Type) Error!ResultId { // We can't use getOrPut here so we can recursively generate types. if (self.spv.types.get(ty)) |already_generated| { return already_generated; @@ -492,7 +493,7 @@ pub const DeclGen = struct { const int_info = ty.intInfo(target); const backing_bits = self.backingIntBits(int_info.bits) orelse { // Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits. - return self.fail(src, "TODO: SPIR-V backend: implement composite int {}", .{ty}); + return self.fail("TODO: SPIR-V backend: implement composite int {}", .{ty}); }; // TODO: If backing_bits != int_info.bits, a duplicate type might be generated here. @@ -518,7 +519,7 @@ pub const DeclGen = struct { }; if (!supported) { - return self.fail(src, "Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits}); + return self.fail("Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits}); } try writeInstruction(code, .OpTypeFloat, &[_]Word{ result_id, bits }); @@ -526,19 +527,19 @@ pub const DeclGen = struct { .Fn => { // We only support zig-calling-convention functions, no varargs. if (ty.fnCallingConvention() != .Unspecified) - return self.fail(src, "Unsupported calling convention for SPIR-V", .{}); + return self.fail("Unsupported calling convention for SPIR-V", .{}); if (ty.fnIsVarArgs()) - return self.fail(src, "VarArgs unsupported for SPIR-V", .{}); + return self.fail("VarArgs unsupported for SPIR-V", .{}); // In order to avoid a temporary here, first generate all the required types and then simply look them up // when generating the function type. const params = ty.fnParamLen(); var i: usize = 0; while (i < params) : (i += 1) { - _ = try self.genType(src, ty.fnParamType(i)); + _ = try self.genType(ty.fnParamType(i)); } - const return_type_id = try self.genType(src, ty.fnReturnType()); + const return_type_id = try self.genType(ty.fnReturnType()); // result id + result type id + parameter type ids. try writeOpcode(code, .OpTypeFunction, 2 + @intCast(u16, ty.fnParamLen())); @@ -551,7 +552,7 @@ pub const DeclGen = struct { } }, // When recursively generating a type, we cannot infer the pointer's storage class. See genPointerType. - .Pointer => return self.fail(src, "Cannot create pointer with unkown storage class", .{}), + .Pointer => return self.fail("Cannot create pointer with unkown storage class", .{}), .Vector => { // Although not 100% the same, Zig vectors map quite neatly to SPIR-V vectors (including many integer and float operations // which work on them), so simply use those. @@ -561,7 +562,7 @@ pub const DeclGen = struct { // is adequate at all for this. // TODO: Vectors are not yet supported by the self-hosted compiler itself it seems. - return self.fail(src, "TODO: SPIR-V backend: implement type Vector", .{}); + return self.fail("TODO: SPIR-V backend: implement type Vector", .{}); }, .Null, .Undefined, @@ -573,7 +574,7 @@ pub const DeclGen = struct { .BoundFn => unreachable, // this type will be deleted from the language. - else => |tag| return self.fail(src, "TODO: SPIR-V backend: implement type {}s", .{tag}), + else => |tag| return self.fail("TODO: SPIR-V backend: implement type {}s", .{tag}), } try self.spv.types.putNoClobber(ty, result_id); @@ -582,8 +583,8 @@ pub const DeclGen = struct { /// SPIR-V requires pointers to have a storage class (address space), and so we have a special function for that. /// TODO: The result of this needs to be cached. - fn genPointerType(self: *DeclGen, src: LazySrcLoc, ty: Type, storage_class: spec.StorageClass) !ResultId { - std.debug.assert(ty.zigTypeTag() == .Pointer); + fn genPointerType(self: *DeclGen, ty: Type, storage_class: spec.StorageClass) !ResultId { + assert(ty.zigTypeTag() == .Pointer); const code = &self.spv.binary.types_globals_constants; const result_id = self.spv.allocResultId(); @@ -591,7 +592,7 @@ pub const DeclGen = struct { // TODO: There are many constraints which are ignored for now: We may only create pointers to certain types, and to other types // if more capabilities are enabled. For example, we may only create pointers to f16 if Float16Buffer is enabled. // These also relates to the pointer's address space. - const child_id = try self.genType(src, ty.elemType()); + const child_id = try self.genType(ty.elemType()); try writeInstruction(code, .OpTypePointer, &[_]Word{ result_id, @enumToInt(storage_class), child_id }); @@ -602,9 +603,9 @@ pub const DeclGen = struct { const decl = self.decl; const result_id = decl.fn_link.spirv.id; - if (decl.val.castTag(.function)) |func_payload| { - std.debug.assert(decl.ty.zigTypeTag() == .Fn); - const prototype_id = try self.genType(.{ .node_offset = 0 }, decl.ty); + if (decl.val.castTag(.function)) |_| { + assert(decl.ty.zigTypeTag() == .Fn); + const prototype_id = try self.genType(decl.ty); try writeInstruction(&self.spv.binary.fn_decls, .OpFunction, &[_]Word{ self.spv.types.get(decl.ty.fnReturnType()).?, // This type should be generated along with the prototype. result_id, @@ -631,189 +632,167 @@ pub const DeclGen = struct { try writeInstruction(&self.spv.binary.fn_decls, .OpLabel, &[_]Word{root_block_id}); self.current_block_label_id = root_block_id; - try self.genBody(func_payload.data.body); + const main_body = self.air.getMainBody(); + try self.genBody(main_body); // Append the actual code into the fn_decls section. try self.spv.binary.fn_decls.appendSlice(self.code.items); try writeInstruction(&self.spv.binary.fn_decls, .OpFunctionEnd, &[_]Word{}); } else { - return self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: generate decl type {}", .{decl.ty.zigTypeTag()}); + return self.fail("TODO: SPIR-V backend: generate decl type {}", .{decl.ty.zigTypeTag()}); } } - fn genBody(self: *DeclGen, body: ir.Body) Error!void { - for (body.instructions) |inst| { + fn genBody(self: *DeclGen, body: []const Air.Inst.Index) Error!void { + for (body) |inst| { try self.genInst(inst); } } - fn genInst(self: *DeclGen, inst: *Inst) !void { - const result_id = switch (inst.tag) { - .add, .addwrap => try self.genBinOp(inst.castTag(.add).?), - .sub, .subwrap => try self.genBinOp(inst.castTag(.sub).?), - .mul, .mulwrap => try self.genBinOp(inst.castTag(.mul).?), - .div => try self.genBinOp(inst.castTag(.div).?), - .bit_and => try self.genBinOp(inst.castTag(.bit_and).?), - .bit_or => try self.genBinOp(inst.castTag(.bit_or).?), - .xor => try self.genBinOp(inst.castTag(.xor).?), - .cmp_eq => try self.genCmp(inst.castTag(.cmp_eq).?), - .cmp_neq => try self.genCmp(inst.castTag(.cmp_neq).?), - .cmp_gt => try self.genCmp(inst.castTag(.cmp_gt).?), - .cmp_gte => try self.genCmp(inst.castTag(.cmp_gte).?), - .cmp_lt => try self.genCmp(inst.castTag(.cmp_lt).?), - .cmp_lte => try self.genCmp(inst.castTag(.cmp_lte).?), - .bool_and => try self.genBinOp(inst.castTag(.bool_and).?), - .bool_or => try self.genBinOp(inst.castTag(.bool_or).?), - .not => try self.genUnOp(inst.castTag(.not).?), - .alloc => try self.genAlloc(inst.castTag(.alloc).?), - .arg => self.genArg(), - .block => (try self.genBlock(inst.castTag(.block).?)) orelse return, - .br => return try self.genBr(inst.castTag(.br).?), - .br_void => return try self.genBrVoid(inst.castTag(.br_void).?), - // TODO: Breakpoints won't be supported in SPIR-V, but the compiler seems to insert them - // throughout the IR. + fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void { + const air_tags = self.air.instructions.items(.tag); + const result_id = switch (air_tags[inst]) { + // zig fmt: off + .add, .addwrap => try self.genArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}), + .sub, .subwrap => try self.genArithOp(inst, .{.OpFSub, .OpISub, .OpISub}), + .mul, .mulwrap => try self.genArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}), + .div => try self.genArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}), + + .bit_and => try self.genBinOpSimple(inst, .OpBitwiseAnd), + .bit_or => try self.genBinOpSimple(inst, .OpBitwiseOr), + .xor => try self.genBinOpSimple(inst, .OpBitwiseXor), + .bool_and => try self.genBinOpSimple(inst, .OpLogicalAnd), + .bool_or => try self.genBinOpSimple(inst, .OpLogicalOr), + + .not => try self.genNot(inst), + + .cmp_eq => try self.genCmp(inst, .{.OpFOrdEqual, .OpLogicalEqual, .OpIEqual}), + .cmp_neq => try self.genCmp(inst, .{.OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual}), + .cmp_gt => try self.genCmp(inst, .{.OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan}), + .cmp_gte => try self.genCmp(inst, .{.OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual}), + .cmp_lt => try self.genCmp(inst, .{.OpFOrdLessThan, .OpSLessThan, .OpULessThan}), + .cmp_lte => try self.genCmp(inst, .{.OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual}), + + .arg => self.genArg(), + .alloc => try self.genAlloc(inst), + .block => (try self.genBlock(inst)) orelse return, + .load => try self.genLoad(inst), + + .br => return self.genBr(inst), .breakpoint => return, - .condbr => return try self.genCondBr(inst.castTag(.condbr).?), - .constant => unreachable, - .dbg_stmt => return try self.genDbgStmt(inst.castTag(.dbg_stmt).?), - .load => try self.genLoad(inst.castTag(.load).?), - .loop => return try self.genLoop(inst.castTag(.loop).?), - .ret => return try self.genRet(inst.castTag(.ret).?), - .retvoid => return try self.genRetVoid(), - .store => return try self.genStore(inst.castTag(.store).?), - .unreach => return try self.genUnreach(), - else => return self.fail(inst.src, "TODO: SPIR-V backend: implement inst {s}", .{@tagName(inst.tag)}), + .condbr => return self.genCondBr(inst), + .constant => unreachable, + .dbg_stmt => return self.genDbgStmt(inst), + .loop => return self.genLoop(inst), + .ret => return self.genRet(inst), + .store => return self.genStore(inst), + .unreach => return self.genUnreach(), + // zig fmt: on }; try self.inst_results.putNoClobber(inst, result_id); } - fn genBinOp(self: *DeclGen, inst: *Inst.BinOp) !ResultId { - // TODO: Will lhs and rhs have the same type? - const lhs_id = try self.resolve(inst.lhs); - const rhs_id = try self.resolve(inst.rhs); + fn genBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, opcode: Opcode) !ResultId { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs_id = try self.resolve(bin_op.lhs); + const rhs_id = try self.resolve(bin_op.rhs); + const result_id = self.spv.allocResultId(); + try writeInstruction(&self.code, opcode, &[_]Word{ + result_type_id, result_id, lhs_id, rhs_id, + }); + return result_id; + } + + fn genArithOp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { + // LHS and RHS are guaranteed to have the same type, and AIR guarantees + // the result to be the same as the LHS and RHS, which matches SPIR-V. + const ty = self.air.getType(inst); + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs_id = try self.resolve(bin_op.lhs); + const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocResultId(); - const result_type_id = try self.genType(inst.base.src, inst.base.ty); - - // TODO: Is the result the same as the argument types? - // This is supposed to be the case for SPIR-V. - std.debug.assert(inst.rhs.ty.eql(inst.lhs.ty)); - std.debug.assert(inst.base.ty.tag() == .bool or inst.base.ty.eql(inst.lhs.ty)); - - // Binary operations are generally applicable to both scalar and vector operations in SPIR-V, but int and float - // versions of operations require different opcodes. - // For operations which produce bools, the information of inst.base.ty is not useful, so just pick either operand - // instead. - const info = try self.arithmeticTypeInfo(inst.lhs.ty); - - if (info.class == .composite_integer) { - return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for composite integers", .{}); - } else if (info.class == .strange_integer) { - return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for strange integers", .{}); - } + const result_type_id = try self.genType(ty); + + assert(self.air.getType(bin_op.lhs).eql(ty)); + assert(self.air.getType(bin_op.rhs).eql(ty)); - const is_float = info.class == .float; - const is_signed = info.signedness == .signed; - // **Note**: All these operations must be valid for vectors as well! - const opcode = switch (inst.base.tag) { - // The regular integer operations are all defined for wrapping. Since theyre only relevant for integers, - // we can just switch on both cases here. - .add, .addwrap => if (is_float) Opcode.OpFAdd else Opcode.OpIAdd, - .sub, .subwrap => if (is_float) Opcode.OpFSub else Opcode.OpISub, - .mul, .mulwrap => if (is_float) Opcode.OpFMul else Opcode.OpIMul, - // TODO: Trap if divisor is 0? - // TODO: Figure out of OpSDiv for unsigned/OpUDiv for signed does anything useful. - // => Those are probably for divTrunc and divFloor, though the compiler does not yet generate those. - // => TODO: Figure out how those work on the SPIR-V side. - // => TODO: Test these. - .div => if (is_float) Opcode.OpFDiv else if (is_signed) Opcode.OpSDiv else Opcode.OpUDiv, - // Only integer versions for these. - .bit_and => Opcode.OpBitwiseAnd, - .bit_or => Opcode.OpBitwiseOr, - .xor => Opcode.OpBitwiseXor, - // Bool -> bool operations. - .bool_and => Opcode.OpLogicalAnd, - .bool_or => Opcode.OpLogicalOr, + // Binary operations are generally applicable to both scalar and vector operations + // in SPIR-V, but int and float versions of operations require different opcodes. + const info = try self.arithmeticTypeInfo(ty); + + const opcode_index: usize = switch (info.class) { + .composite_integer => { + return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{}); + }, + .strange_integer => { + return self.fail("TODO: SPIR-V backend: binary operations for strange integers", .{}); + }, + .integer => switch (info.signedness) { + .signed => 1, + .unsigned => 2, + }, + .float => 0, else => unreachable, }; - + const opcode = ops[opcode_index]; try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id }); // TODO: Trap on overflow? Probably going to be annoying. // TODO: Look into SPV_KHR_no_integer_wrap_decoration which provides NoSignedWrap/NoUnsignedWrap. - if (info.class != .strange_integer) - return result_id; - - return self.fail(inst.base.src, "TODO: SPIR-V backend: strange integer operation mask", .{}); + return result_id; } - fn genCmp(self: *DeclGen, inst: *Inst.BinOp) !ResultId { - const lhs_id = try self.resolve(inst.lhs); - const rhs_id = try self.resolve(inst.rhs); - + fn genCmp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs_id = try self.resolve(bin_op.lhs); + const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocResultId(); - const result_type_id = try self.genType(inst.base.src, inst.base.ty); - - // All of these operations should be 2 equal types -> bool - std.debug.assert(inst.rhs.ty.eql(inst.lhs.ty)); - std.debug.assert(inst.base.ty.tag() == .bool); - - // Comparisons are generally applicable to both scalar and vector operations in SPIR-V, but int and float - // versions of operations require different opcodes. - // Since inst.base.ty is always bool and so not very useful, and because both arguments must be the same, just get the info - // from either of the operands. - const info = try self.arithmeticTypeInfo(inst.lhs.ty); - - if (info.class == .composite_integer) { - return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for composite integers", .{}); - } else if (info.class == .strange_integer) { - return self.fail(inst.base.src, "TODO: SPIR-V backend: comparison for strange integers", .{}); - } + const result_type_id = try self.genType(Type.initTag(.bool)); + const op_ty = self.air.getType(bin_op.lhs); + assert(op_ty.eql(self.air.getType(bin_op.rhs))); - const is_bool = info.class == .bool; - const is_float = info.class == .float; - const is_signed = info.signedness == .signed; - - // **Note**: All these operations must be valid for vectors as well! - // For floating points, we generally want ordered operations (which return false if either operand is nan). - const opcode = switch (inst.base.tag) { - .cmp_eq => if (is_float) Opcode.OpFOrdEqual else if (is_bool) Opcode.OpLogicalEqual else Opcode.OpIEqual, - .cmp_neq => if (is_float) Opcode.OpFOrdNotEqual else if (is_bool) Opcode.OpLogicalNotEqual else Opcode.OpINotEqual, - // TODO: Verify that these OpFOrd type operations produce the right value. - // TODO: Is there a more fundamental difference between OpU and OpS operations here than just the type? - .cmp_gt => if (is_float) Opcode.OpFOrdGreaterThan else if (is_signed) Opcode.OpSGreaterThan else Opcode.OpUGreaterThan, - .cmp_gte => if (is_float) Opcode.OpFOrdGreaterThanEqual else if (is_signed) Opcode.OpSGreaterThanEqual else Opcode.OpUGreaterThanEqual, - .cmp_lt => if (is_float) Opcode.OpFOrdLessThan else if (is_signed) Opcode.OpSLessThan else Opcode.OpULessThan, - .cmp_lte => if (is_float) Opcode.OpFOrdLessThanEqual else if (is_signed) Opcode.OpSLessThanEqual else Opcode.OpULessThanEqual, + // Comparisons are generally applicable to both scalar and vector operations in SPIR-V, + // but int and float versions of operations require different opcodes. + const info = try self.arithmeticTypeInfo(op_ty); + + const opcode_index: usize = switch (info.class) { + .composite_integer => { + return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{}); + }, + .strange_integer => { + return self.fail("TODO: SPIR-V backend: comparison for strange integers", .{}); + }, + .float => 0, + .bool => 1, + .integer => switch (info.signedness) { + .signed => 1, + .unsigned => 2, + }, else => unreachable, }; + const opcode = ops[opcode_index]; try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id }); return result_id; } - fn genUnOp(self: *DeclGen, inst: *Inst.UnOp) !ResultId { - const operand_id = try self.resolve(inst.operand); - + fn genNot(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand_id = try self.resolve(ty_op.operand); const result_id = self.spv.allocResultId(); - const result_type_id = try self.genType(inst.base.src, inst.base.ty); - - const opcode = switch (inst.base.tag) { - // Bool -> bool - .not => Opcode.OpLogicalNot, - else => unreachable, - }; - + const result_type_id = try self.genType(Type.initTag(.bool)); + const opcode: Opcode = .OpLogicalNot; try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, operand_id }); - return result_id; } - fn genAlloc(self: *DeclGen, inst: *Inst.NoOp) !ResultId { + fn genAlloc(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + const ty = self.air.getType(inst); const storage_class = spec.StorageClass.Function; - const result_type_id = try self.genPointerType(inst.base.src, inst.base.ty, storage_class); + const result_type_id = try self.genPointerType(ty, storage_class); const result_id = self.spv.allocResultId(); // Rather than generating into code here, we're just going to generate directly into the fn_decls section so that @@ -828,7 +807,7 @@ pub const DeclGen = struct { return self.args.items[self.next_arg_index]; } - fn genBlock(self: *DeclGen, inst: *Inst.Block) !?ResultId { + fn genBlock(self: *DeclGen, inst: Air.Inst.Index) !?ResultId { // In IR, a block doesn't really define an entry point like a block, but more like a scope that breaks can jump out of and // "return" a value from. This cannot be directly modelled in SPIR-V, so in a block instruction, we're going to split up // the current block by first generating the code of the block, then a label, and then generate the rest of the current @@ -848,11 +827,16 @@ pub const DeclGen = struct { incoming_blocks.deinit(self.spv.gpa); } - try self.genBody(inst.body); + const ty = self.air.getType(inst); + const inst_datas = self.air.instructions.items(.data); + const extra = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + const body = self.air.extra[extra.end..][0..extra.data.body_len]; + + try self.genBody(body); try self.beginSPIRVBlock(label_id); // If this block didn't produce a value, simply return here. - if (!inst.base.ty.hasCodeGenBits()) + if (!ty.hasCodeGenBits()) return null; // Combine the result from the blocks using the Phi instruction. @@ -862,7 +846,7 @@ pub const DeclGen = struct { // TODO: OpPhi is limited in the types that it may produce, such as pointers. Figure out which other types // are not allowed to be created from a phi node, and throw an error for those. For now, genType already throws // an error for pointers. - const result_type_id = try self.genType(inst.base.src, inst.base.ty); + const result_type_id = try self.genType(ty); _ = result_type_id; try writeOpcode(&self.code, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent... @@ -874,30 +858,26 @@ pub const DeclGen = struct { return result_id; } - fn genBr(self: *DeclGen, inst: *Inst.Br) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - const target = self.blocks.get(inst.block).?; + fn genBr(self: *DeclGen, inst: Air.Inst.Index) !void { + const br = self.air.instructions.items(.data)[inst].br; + const block = self.blocks.get(br.block_inst).?; + const operand_ty = self.air.getType(br.operand); - // TODO: For some reason, br is emitted with void parameters. - if (inst.operand.ty.hasCodeGenBits()) { - const operand_id = try self.resolve(inst.operand); + if (operand_ty.hasCodeGenBits()) { + const operand_id = try self.resolve(br.operand); // current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body. - try target.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id }); + try block.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id }); } - try writeInstruction(&self.code, .OpBranch, &[_]Word{target.label_id}); - } - - fn genBrVoid(self: *DeclGen, inst: *Inst.BrVoid) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - const target = self.blocks.get(inst.block).?; - // Don't need to add this to the incoming block list, as there is no value to insert in the phi node anyway. - try writeInstruction(&self.code, .OpBranch, &[_]Word{target.label_id}); + try writeInstruction(&self.code, .OpBranch, &[_]Word{block.label_id}); } fn genCondBr(self: *DeclGen, inst: *Inst.CondBr) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - const condition_id = try self.resolve(inst.condition); + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const cond_br = self.air.extraData(Air.CondBr, pl_op.payload); + const then_body = self.air.extra[cond_br.end..][0..cond_br.data.then_body_len]; + const else_body = self.air.extra[cond_br.end + then_body.len ..][0..cond_br.data.else_body_len]; + const condition_id = try self.resolve(pl_op.operand); // These will always generate a new SPIR-V block, since they are ir.Body and not ir.Block. const then_label_id = self.spv.allocResultId(); @@ -913,23 +893,26 @@ pub const DeclGen = struct { }); try self.beginSPIRVBlock(then_label_id); - try self.genBody(inst.then_body); + try self.genBody(then_body); try self.beginSPIRVBlock(else_label_id); - try self.genBody(inst.else_body); + try self.genBody(else_body); } - fn genDbgStmt(self: *DeclGen, inst: *Inst.DbgStmt) !void { + fn genDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { + const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; const src_fname_id = try self.spv.resolveSourceFileName(self.decl); - try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, inst.line, inst.column }); + try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, dbg_stmt.line, dbg_stmt.column }); } - fn genLoad(self: *DeclGen, inst: *Inst.UnOp) !ResultId { - const operand_id = try self.resolve(inst.operand); + fn genLoad(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand_id = try self.resolve(ty_op.operand); + const ty = self.air.getType(inst); - const result_type_id = try self.genType(inst.base.src, inst.base.ty); + const result_type_id = try self.genType(ty); const result_id = self.spv.allocResultId(); - const operands = if (inst.base.ty.isVolatilePtr()) + const operands = if (ty.isVolatilePtr()) &[_]Word{ result_type_id, result_id, operand_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) } else &[_]Word{ result_type_id, result_id, operand_id }; @@ -939,8 +922,9 @@ pub const DeclGen = struct { return result_id; } - fn genLoop(self: *DeclGen, inst: *Inst.Loop) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? + fn genLoop(self: *DeclGen, inst: Air.Inst.Index) !void { + const loop = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + const body = self.air.extra[loop.end..][0..loop.data.body_len]; const loop_label_id = self.spv.allocResultId(); // Jump to the loop entry point @@ -949,27 +933,29 @@ pub const DeclGen = struct { // TODO: Look into OpLoopMerge. try self.beginSPIRVBlock(loop_label_id); - try self.genBody(inst.body); + try self.genBody(body); try writeInstruction(&self.code, .OpBranch, &[_]Word{loop_label_id}); } - fn genRet(self: *DeclGen, inst: *Inst.UnOp) !void { - const operand_id = try self.resolve(inst.operand); - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id}); - } - - fn genRetVoid(self: *DeclGen) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - try writeInstruction(&self.code, .OpReturn, &[_]Word{}); + fn genRet(self: *DeclGen, inst: Air.Inst.Index) !void { + const operand = inst_datas[inst].un_op; + const operand_ty = self.air.getType(operand); + if (operand_ty.hasCodeGenBits()) { + const operand_id = try self.resolve(operand); + try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id}); + } else { + try writeInstruction(&self.code, .OpReturn, &[_]Word{}); + } } - fn genStore(self: *DeclGen, inst: *Inst.BinOp) !void { - const dst_ptr_id = try self.resolve(inst.lhs); - const src_val_id = try self.resolve(inst.rhs); + fn genStore(self: *DeclGen, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const dst_ptr_id = try self.resolve(bin_op.lhs); + const src_val_id = try self.resolve(bin_op.rhs); + const lhs_ty = self.air.getType(bin_op.lhs); - const operands = if (inst.lhs.ty.isVolatilePtr()) + const operands = if (lhs_ty.isVolatilePtr()) &[_]Word{ dst_ptr_id, src_val_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) } else &[_]Word{ dst_ptr_id, src_val_id }; @@ -978,7 +964,6 @@ pub const DeclGen = struct { } fn genUnreach(self: *DeclGen) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? try writeInstruction(&self.code, .OpUnreachable, &[_]Word{}); } }; -- cgit v1.2.3 From 0f38f686964664f68e013ec3c63cfe655001f165 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 12 Jul 2021 19:51:31 -0700 Subject: stage2: Air and Liveness are passed ephemerally to the link infrastructure, instead of being stored with Module.Fn. This moves towards a strategy to make more efficient use of memory by not storing Air or Liveness data in the Fn struct, but computing it on demand, immediately sending it to the backend, and then immediately freeing it. Backends which want to defer codegen until flush() such as SPIR-V must move the Air/Liveness data upon `updateFunc` being called and keep track of that data in the backend implementation itself. --- BRANCH_TODO | 5 + src/Compilation.zig | 2 +- src/Liveness.zig | 9 +- src/Module.zig | 5 - src/Sema.zig | 762 +++++++++++++++++++++++++------------------------- src/codegen.zig | 7 +- src/codegen/c.zig | 9 +- src/codegen/llvm.zig | 3 + src/codegen/spirv.zig | 3 +- src/codegen/wasm.zig | 88 +++--- src/link.zig | 34 ++- src/link/C.zig | 28 +- src/link/Coff.zig | 56 +++- src/link/Elf.zig | 558 +++++++++++++++++++----------------- src/link/MachO.zig | 55 ++++ src/link/Plan9.zig | 29 +- src/link/SpirV.zig | 24 +- src/link/Wasm.zig | 59 +++- 18 files changed, 1023 insertions(+), 713 deletions(-) (limited to 'src/Compilation.zig') diff --git a/BRANCH_TODO b/BRANCH_TODO index 585c8adf44..c7f3923559 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -690,3 +690,8 @@ pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { } } + /// For debugging purposes. + pub fn dump(func: *Fn, mod: Module) void { + ir.dumpFn(mod, func); + } + diff --git a/src/Compilation.zig b/src/Compilation.zig index 74ad7b2aae..90224a77d1 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2027,7 +2027,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor defer liveness.deinit(gpa); if (std.builtin.mode == .Debug and self.verbose_air) { - func.dump(module.*); + @panic("TODO implement dumping AIR and liveness"); } assert(decl.ty.hasCodeGenBits()); diff --git a/src/Liveness.zig b/src/Liveness.zig index 0cbac61118..1402a5997b 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -50,7 +50,7 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { var a: Analysis = .{ .gpa = gpa, - .air = &air, + .air = air, .table = .{}, .tomb_bits = try gpa.alloc( usize, @@ -65,7 +65,7 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { defer a.table.deinit(gpa); const main_body = air.getMainBody(); - try a.table.ensureTotalCapacity(main_body.len); + try a.table.ensureTotalCapacity(gpa, @intCast(u32, main_body.len)); try analyzeWithContext(&a, null, main_body); return Liveness{ .tomb_bits = a.tomb_bits, @@ -108,9 +108,10 @@ const OperandInt = std.math.Log2Int(Bpi); /// In-progress data; on successful analysis converted into `Liveness`. const Analysis = struct { gpa: *Allocator, - air: *const Air, + air: Air, table: std.AutoHashMapUnmanaged(Air.Inst.Index, void), tomb_bits: []usize, + special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), extra: std.ArrayListUnmanaged(u32), fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void { @@ -165,7 +166,7 @@ fn analyzeWithContext( fn analyzeInst( a: *Analysis, - new_set: ?*std.AutoHashMap(Air.Inst.Index, void), + new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), inst: Air.Inst.Index, ) Allocator.Error!void { const gpa = a.gpa; diff --git a/src/Module.zig b/src/Module.zig index 8971a57487..5972c2bdcf 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -769,11 +769,6 @@ pub const Fn = struct { success, }; - /// For debugging purposes. - pub fn dump(func: *Fn, mod: Module) void { - ir.dumpFn(mod, func); - } - pub fn deinit(func: *Fn, gpa: *Allocator) void { if (func.getInferredErrorSet()) |map| { map.deinit(gpa); diff --git a/src/Sema.zig b/src/Sema.zig index d7ec01696f..54c42a482d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -69,7 +69,7 @@ const LazySrcLoc = Module.LazySrcLoc; const RangeSet = @import("RangeSet.zig"); const target_util = @import("target.zig"); -pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Index); +pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Ref); pub fn deinit(sema: *Sema) void { const gpa = sema.gpa; @@ -158,344 +158,344 @@ pub fn analyzeBody( var i: usize = 0; while (true) { const inst = body[i]; - const air_inst = switch (tags[inst]) { + const air_inst: Air.Inst.Ref = switch (tags[inst]) { // zig fmt: off .arg => try sema.zirArg(block, inst), - .alloc => try sema.zirAlloc(block, inst), - .alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), - .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), - .alloc_inferred_comptime => try sema.zirAllocInferredComptime(block, inst), - .alloc_mut => try sema.zirAllocMut(block, inst), - .alloc_comptime => try sema.zirAllocComptime(block, inst), - .anyframe_type => try sema.zirAnyframeType(block, inst), - .array_cat => try sema.zirArrayCat(block, inst), - .array_mul => try sema.zirArrayMul(block, inst), - .array_type => try sema.zirArrayType(block, inst), - .array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), - .vector_type => try sema.zirVectorType(block, inst), - .as => try sema.zirAs(block, inst), - .as_node => try sema.zirAsNode(block, inst), - .bit_and => try sema.zirBitwise(block, inst, .bit_and), - .bit_not => try sema.zirBitNot(block, inst), - .bit_or => try sema.zirBitwise(block, inst, .bit_or), - .bitcast => try sema.zirBitcast(block, inst), - .bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst), - .block => try sema.zirBlock(block, inst), - .suspend_block => try sema.zirSuspendBlock(block, inst), - .bool_not => try sema.zirBoolNot(block, inst), - .bool_and => try sema.zirBoolOp(block, inst, false), - .bool_or => try sema.zirBoolOp(block, inst, true), - .bool_br_and => try sema.zirBoolBr(block, inst, false), - .bool_br_or => try sema.zirBoolBr(block, inst, true), - .c_import => try sema.zirCImport(block, inst), - .call => try sema.zirCall(block, inst, .auto, false), - .call_chkused => try sema.zirCall(block, inst, .auto, true), - .call_compile_time => try sema.zirCall(block, inst, .compile_time, false), - .call_nosuspend => try sema.zirCall(block, inst, .no_async, false), - .call_async => try sema.zirCall(block, inst, .async_kw, false), - .cmp_eq => try sema.zirCmp(block, inst, .eq), - .cmp_gt => try sema.zirCmp(block, inst, .gt), - .cmp_gte => try sema.zirCmp(block, inst, .gte), - .cmp_lt => try sema.zirCmp(block, inst, .lt), - .cmp_lte => try sema.zirCmp(block, inst, .lte), - .cmp_neq => try sema.zirCmp(block, inst, .neq), - .coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), - .decl_ref => try sema.zirDeclRef(block, inst), - .decl_val => try sema.zirDeclVal(block, inst), - .load => try sema.zirLoad(block, inst), - .elem_ptr => try sema.zirElemPtr(block, inst), - .elem_ptr_node => try sema.zirElemPtrNode(block, inst), - .elem_val => try sema.zirElemVal(block, inst), - .elem_val_node => try sema.zirElemValNode(block, inst), - .elem_type => try sema.zirElemType(block, inst), - .enum_literal => try sema.zirEnumLiteral(block, inst), - .enum_to_int => try sema.zirEnumToInt(block, inst), - .int_to_enum => try sema.zirIntToEnum(block, inst), - .err_union_code => try sema.zirErrUnionCode(block, inst), - .err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst), - .err_union_payload_safe => try sema.zirErrUnionPayload(block, inst, true), - .err_union_payload_safe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, true), - .err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst, false), - .err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, false), - .error_union_type => try sema.zirErrorUnionType(block, inst), - .error_value => try sema.zirErrorValue(block, inst), - .error_to_int => try sema.zirErrorToInt(block, inst), - .int_to_error => try sema.zirIntToError(block, inst), - .field_ptr => try sema.zirFieldPtr(block, inst), - .field_ptr_named => try sema.zirFieldPtrNamed(block, inst), - .field_val => try sema.zirFieldVal(block, inst), - .field_val_named => try sema.zirFieldValNamed(block, inst), - .func => try sema.zirFunc(block, inst, false), - .func_inferred => try sema.zirFunc(block, inst, true), - .import => try sema.zirImport(block, inst), - .indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst), - .int => try sema.zirInt(block, inst), - .int_big => try sema.zirIntBig(block, inst), - .float => try sema.zirFloat(block, inst), - .float128 => try sema.zirFloat128(block, inst), - .int_type => try sema.zirIntType(block, inst), - .is_non_err => try sema.zirIsNonErr(block, inst), - .is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), - .is_non_null => try sema.zirIsNonNull(block, inst), - .is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), - .loop => try sema.zirLoop(block, inst), - .merge_error_sets => try sema.zirMergeErrorSets(block, inst), - .negate => try sema.zirNegate(block, inst, .sub), - .negate_wrap => try sema.zirNegate(block, inst, .subwrap), - .optional_payload_safe => try sema.zirOptionalPayload(block, inst, true), - .optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true), - .optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false), - .optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false), - .optional_type => try sema.zirOptionalType(block, inst), - .param_type => try sema.zirParamType(block, inst), - .ptr_type => try sema.zirPtrType(block, inst), - .ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), - .ref => try sema.zirRef(block, inst), - .ret_err_value_code => try sema.zirRetErrValueCode(block, inst), - .shl => try sema.zirShl(block, inst), - .shr => try sema.zirShr(block, inst), - .slice_end => try sema.zirSliceEnd(block, inst), - .slice_sentinel => try sema.zirSliceSentinel(block, inst), - .slice_start => try sema.zirSliceStart(block, inst), - .str => try sema.zirStr(block, inst), - .switch_block => try sema.zirSwitchBlock(block, inst, false, .none), - .switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none), - .switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"), - .switch_block_else_multi => try sema.zirSwitchBlockMulti(block, inst, false, .@"else"), - .switch_block_under => try sema.zirSwitchBlock(block, inst, false, .under), - .switch_block_under_multi => try sema.zirSwitchBlockMulti(block, inst, false, .under), - .switch_block_ref => try sema.zirSwitchBlock(block, inst, true, .none), - .switch_block_ref_multi => try sema.zirSwitchBlockMulti(block, inst, true, .none), - .switch_block_ref_else => try sema.zirSwitchBlock(block, inst, true, .@"else"), - .switch_block_ref_else_multi => try sema.zirSwitchBlockMulti(block, inst, true, .@"else"), - .switch_block_ref_under => try sema.zirSwitchBlock(block, inst, true, .under), - .switch_block_ref_under_multi => try sema.zirSwitchBlockMulti(block, inst, true, .under), - .switch_capture => try sema.zirSwitchCapture(block, inst, false, false), - .switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), - .switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), - .switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), - .switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), - .switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), - .type_info => try sema.zirTypeInfo(block, inst), - .size_of => try sema.zirSizeOf(block, inst), - .bit_size_of => try sema.zirBitSizeOf(block, inst), - .typeof => try sema.zirTypeof(block, inst), - .typeof_elem => try sema.zirTypeofElem(block, inst), - .log2_int_type => try sema.zirLog2IntType(block, inst), - .typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst), - .xor => try sema.zirBitwise(block, inst, .xor), - .struct_init_empty => try sema.zirStructInitEmpty(block, inst), - .struct_init => try sema.zirStructInit(block, inst, false), - .struct_init_ref => try sema.zirStructInit(block, inst, true), - .struct_init_anon => try sema.zirStructInitAnon(block, inst, false), - .struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true), - .array_init => try sema.zirArrayInit(block, inst, false), - .array_init_ref => try sema.zirArrayInit(block, inst, true), - .array_init_anon => try sema.zirArrayInitAnon(block, inst, false), - .array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true), - .union_init_ptr => try sema.zirUnionInitPtr(block, inst), - .field_type => try sema.zirFieldType(block, inst), - .field_type_ref => try sema.zirFieldTypeRef(block, inst), - .ptr_to_int => try sema.zirPtrToInt(block, inst), - .align_of => try sema.zirAlignOf(block, inst), - .bool_to_int => try sema.zirBoolToInt(block, inst), - .embed_file => try sema.zirEmbedFile(block, inst), - .error_name => try sema.zirErrorName(block, inst), - .tag_name => try sema.zirTagName(block, inst), - .reify => try sema.zirReify(block, inst), - .type_name => try sema.zirTypeName(block, inst), - .frame_type => try sema.zirFrameType(block, inst), - .frame_size => try sema.zirFrameSize(block, inst), - .float_to_int => try sema.zirFloatToInt(block, inst), - .int_to_float => try sema.zirIntToFloat(block, inst), - .int_to_ptr => try sema.zirIntToPtr(block, inst), - .float_cast => try sema.zirFloatCast(block, inst), - .int_cast => try sema.zirIntCast(block, inst), - .err_set_cast => try sema.zirErrSetCast(block, inst), - .ptr_cast => try sema.zirPtrCast(block, inst), - .truncate => try sema.zirTruncate(block, inst), - .align_cast => try sema.zirAlignCast(block, inst), - .has_decl => try sema.zirHasDecl(block, inst), - .has_field => try sema.zirHasField(block, inst), - .clz => try sema.zirClz(block, inst), - .ctz => try sema.zirCtz(block, inst), - .pop_count => try sema.zirPopCount(block, inst), - .byte_swap => try sema.zirByteSwap(block, inst), - .bit_reverse => try sema.zirBitReverse(block, inst), - .div_exact => try sema.zirDivExact(block, inst), - .div_floor => try sema.zirDivFloor(block, inst), - .div_trunc => try sema.zirDivTrunc(block, inst), - .mod => try sema.zirMod(block, inst), - .rem => try sema.zirRem(block, inst), - .shl_exact => try sema.zirShlExact(block, inst), - .shr_exact => try sema.zirShrExact(block, inst), - .bit_offset_of => try sema.zirBitOffsetOf(block, inst), - .offset_of => try sema.zirOffsetOf(block, inst), - .cmpxchg_strong => try sema.zirCmpxchg(block, inst), - .cmpxchg_weak => try sema.zirCmpxchg(block, inst), - .splat => try sema.zirSplat(block, inst), - .reduce => try sema.zirReduce(block, inst), - .shuffle => try sema.zirShuffle(block, inst), - .atomic_load => try sema.zirAtomicLoad(block, inst), - .atomic_rmw => try sema.zirAtomicRmw(block, inst), - .atomic_store => try sema.zirAtomicStore(block, inst), - .mul_add => try sema.zirMulAdd(block, inst), - .builtin_call => try sema.zirBuiltinCall(block, inst), - .field_ptr_type => try sema.zirFieldPtrType(block, inst), - .field_parent_ptr => try sema.zirFieldParentPtr(block, inst), - .memcpy => try sema.zirMemcpy(block, inst), - .memset => try sema.zirMemset(block, inst), - .builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst), - .@"resume" => try sema.zirResume(block, inst), - .@"await" => try sema.zirAwait(block, inst, false), - .await_nosuspend => try sema.zirAwait(block, inst, true), - .extended => try sema.zirExtended(block, inst), - - .sqrt => try sema.zirUnaryMath(block, inst), - .sin => try sema.zirUnaryMath(block, inst), - .cos => try sema.zirUnaryMath(block, inst), - .exp => try sema.zirUnaryMath(block, inst), - .exp2 => try sema.zirUnaryMath(block, inst), - .log => try sema.zirUnaryMath(block, inst), - .log2 => try sema.zirUnaryMath(block, inst), - .log10 => try sema.zirUnaryMath(block, inst), - .fabs => try sema.zirUnaryMath(block, inst), - .floor => try sema.zirUnaryMath(block, inst), - .ceil => try sema.zirUnaryMath(block, inst), - .trunc => try sema.zirUnaryMath(block, inst), - .round => try sema.zirUnaryMath(block, inst), - - .opaque_decl => try sema.zirOpaqueDecl(block, inst, .parent), - .opaque_decl_anon => try sema.zirOpaqueDecl(block, inst, .anon), - .opaque_decl_func => try sema.zirOpaqueDecl(block, inst, .func), - .error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent), - .error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), - .error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), - - .add => try sema.zirArithmetic(block, inst), - .addwrap => try sema.zirArithmetic(block, inst), - .div => try sema.zirArithmetic(block, inst), - .mod_rem => try sema.zirArithmetic(block, inst), - .mul => try sema.zirArithmetic(block, inst), - .mulwrap => try sema.zirArithmetic(block, inst), - .sub => try sema.zirArithmetic(block, inst), - .subwrap => try sema.zirArithmetic(block, inst), - - // Instructions that we know to *always* be noreturn based solely on their tag. - // These functions match the return type of analyzeBody so that we can - // tail call them here. - .break_inline => return inst, - .condbr => return sema.zirCondbr(block, inst), - .@"break" => return sema.zirBreak(block, inst), - .compile_error => return sema.zirCompileError(block, inst), - .ret_coerce => return sema.zirRetCoerce(block, inst, true), - .ret_node => return sema.zirRetNode(block, inst), - .ret_err_value => return sema.zirRetErrValue(block, inst), - .@"unreachable" => return sema.zirUnreachable(block, inst), - .repeat => return sema.zirRepeat(block, inst), - .panic => return sema.zirPanic(block, inst), - // zig fmt: on - - // Instructions that we know can *never* be noreturn based solely on - // their tag. We avoid needlessly checking if they are noreturn and - // continue the loop. - // We also know that they cannot be referenced later, so we avoid - // putting them into the map. - .breakpoint => { - try sema.zirBreakpoint(block, inst); - i += 1; - continue; - }, - .fence => { - try sema.zirFence(block, inst); - i += 1; - continue; - }, - .dbg_stmt => { - try sema.zirDbgStmt(block, inst); - i += 1; - continue; - }, - .ensure_err_payload_void => { - try sema.zirEnsureErrPayloadVoid(block, inst); - i += 1; - continue; - }, - .ensure_result_non_error => { - try sema.zirEnsureResultNonError(block, inst); - i += 1; - continue; - }, - .ensure_result_used => { - try sema.zirEnsureResultUsed(block, inst); - i += 1; - continue; - }, - .set_eval_branch_quota => { - try sema.zirSetEvalBranchQuota(block, inst); - i += 1; - continue; - }, - .store => { - try sema.zirStore(block, inst); - i += 1; - continue; - }, - .store_node => { - try sema.zirStoreNode(block, inst); - i += 1; - continue; - }, - .store_to_block_ptr => { - try sema.zirStoreToBlockPtr(block, inst); - i += 1; - continue; - }, - .store_to_inferred_ptr => { - try sema.zirStoreToInferredPtr(block, inst); - i += 1; - continue; - }, - .resolve_inferred_alloc => { - try sema.zirResolveInferredAlloc(block, inst); - i += 1; - continue; - }, - .validate_struct_init_ptr => { - try sema.zirValidateStructInitPtr(block, inst); - i += 1; - continue; - }, - .validate_array_init_ptr => { - try sema.zirValidateArrayInitPtr(block, inst); - i += 1; - continue; - }, - .@"export" => { - try sema.zirExport(block, inst); - i += 1; - continue; - }, - .set_align_stack => { - try sema.zirSetAlignStack(block, inst); - i += 1; - continue; - }, - .set_cold => { - try sema.zirSetCold(block, inst); - i += 1; - continue; - }, - .set_float_mode => { - try sema.zirSetFloatMode(block, inst); - i += 1; - continue; - }, - .set_runtime_safety => { - try sema.zirSetRuntimeSafety(block, inst); - i += 1; - continue; - }, + //.alloc => try sema.zirAlloc(block, inst), + //.alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), + //.alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), + //.alloc_inferred_comptime => try sema.zirAllocInferredComptime(block, inst), + //.alloc_mut => try sema.zirAllocMut(block, inst), + //.alloc_comptime => try sema.zirAllocComptime(block, inst), + //.anyframe_type => try sema.zirAnyframeType(block, inst), + //.array_cat => try sema.zirArrayCat(block, inst), + //.array_mul => try sema.zirArrayMul(block, inst), + //.array_type => try sema.zirArrayType(block, inst), + //.array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), + //.vector_type => try sema.zirVectorType(block, inst), + //.as => try sema.zirAs(block, inst), + //.as_node => try sema.zirAsNode(block, inst), + //.bit_and => try sema.zirBitwise(block, inst, .bit_and), + //.bit_not => try sema.zirBitNot(block, inst), + //.bit_or => try sema.zirBitwise(block, inst, .bit_or), + //.bitcast => try sema.zirBitcast(block, inst), + //.bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst), + //.block => try sema.zirBlock(block, inst), + //.suspend_block => try sema.zirSuspendBlock(block, inst), + //.bool_not => try sema.zirBoolNot(block, inst), + //.bool_and => try sema.zirBoolOp(block, inst, false), + //.bool_or => try sema.zirBoolOp(block, inst, true), + //.bool_br_and => try sema.zirBoolBr(block, inst, false), + //.bool_br_or => try sema.zirBoolBr(block, inst, true), + //.c_import => try sema.zirCImport(block, inst), + //.call => try sema.zirCall(block, inst, .auto, false), + //.call_chkused => try sema.zirCall(block, inst, .auto, true), + //.call_compile_time => try sema.zirCall(block, inst, .compile_time, false), + //.call_nosuspend => try sema.zirCall(block, inst, .no_async, false), + //.call_async => try sema.zirCall(block, inst, .async_kw, false), + //.cmp_eq => try sema.zirCmp(block, inst, .eq), + //.cmp_gt => try sema.zirCmp(block, inst, .gt), + //.cmp_gte => try sema.zirCmp(block, inst, .gte), + //.cmp_lt => try sema.zirCmp(block, inst, .lt), + //.cmp_lte => try sema.zirCmp(block, inst, .lte), + //.cmp_neq => try sema.zirCmp(block, inst, .neq), + //.coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), + //.decl_ref => try sema.zirDeclRef(block, inst), + //.decl_val => try sema.zirDeclVal(block, inst), + //.load => try sema.zirLoad(block, inst), + //.elem_ptr => try sema.zirElemPtr(block, inst), + //.elem_ptr_node => try sema.zirElemPtrNode(block, inst), + //.elem_val => try sema.zirElemVal(block, inst), + //.elem_val_node => try sema.zirElemValNode(block, inst), + //.elem_type => try sema.zirElemType(block, inst), + //.enum_literal => try sema.zirEnumLiteral(block, inst), + //.enum_to_int => try sema.zirEnumToInt(block, inst), + //.int_to_enum => try sema.zirIntToEnum(block, inst), + //.err_union_code => try sema.zirErrUnionCode(block, inst), + //.err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst), + //.err_union_payload_safe => try sema.zirErrUnionPayload(block, inst, true), + //.err_union_payload_safe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, true), + //.err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst, false), + //.err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, false), + //.error_union_type => try sema.zirErrorUnionType(block, inst), + //.error_value => try sema.zirErrorValue(block, inst), + //.error_to_int => try sema.zirErrorToInt(block, inst), + //.int_to_error => try sema.zirIntToError(block, inst), + //.field_ptr => try sema.zirFieldPtr(block, inst), + //.field_ptr_named => try sema.zirFieldPtrNamed(block, inst), + //.field_val => try sema.zirFieldVal(block, inst), + //.field_val_named => try sema.zirFieldValNamed(block, inst), + //.func => try sema.zirFunc(block, inst, false), + //.func_inferred => try sema.zirFunc(block, inst, true), + //.import => try sema.zirImport(block, inst), + //.indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst), + //.int => try sema.zirInt(block, inst), + //.int_big => try sema.zirIntBig(block, inst), + //.float => try sema.zirFloat(block, inst), + //.float128 => try sema.zirFloat128(block, inst), + //.int_type => try sema.zirIntType(block, inst), + //.is_non_err => try sema.zirIsNonErr(block, inst), + //.is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), + //.is_non_null => try sema.zirIsNonNull(block, inst), + //.is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), + //.loop => try sema.zirLoop(block, inst), + //.merge_error_sets => try sema.zirMergeErrorSets(block, inst), + //.negate => try sema.zirNegate(block, inst, .sub), + //.negate_wrap => try sema.zirNegate(block, inst, .subwrap), + //.optional_payload_safe => try sema.zirOptionalPayload(block, inst, true), + //.optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true), + //.optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false), + //.optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false), + //.optional_type => try sema.zirOptionalType(block, inst), + //.param_type => try sema.zirParamType(block, inst), + //.ptr_type => try sema.zirPtrType(block, inst), + //.ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), + //.ref => try sema.zirRef(block, inst), + //.ret_err_value_code => try sema.zirRetErrValueCode(block, inst), + //.shl => try sema.zirShl(block, inst), + //.shr => try sema.zirShr(block, inst), + //.slice_end => try sema.zirSliceEnd(block, inst), + //.slice_sentinel => try sema.zirSliceSentinel(block, inst), + //.slice_start => try sema.zirSliceStart(block, inst), + //.str => try sema.zirStr(block, inst), + //.switch_block => try sema.zirSwitchBlock(block, inst, false, .none), + //.switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none), + //.switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"), + //.switch_block_else_multi => try sema.zirSwitchBlockMulti(block, inst, false, .@"else"), + //.switch_block_under => try sema.zirSwitchBlock(block, inst, false, .under), + //.switch_block_under_multi => try sema.zirSwitchBlockMulti(block, inst, false, .under), + //.switch_block_ref => try sema.zirSwitchBlock(block, inst, true, .none), + //.switch_block_ref_multi => try sema.zirSwitchBlockMulti(block, inst, true, .none), + //.switch_block_ref_else => try sema.zirSwitchBlock(block, inst, true, .@"else"), + //.switch_block_ref_else_multi => try sema.zirSwitchBlockMulti(block, inst, true, .@"else"), + //.switch_block_ref_under => try sema.zirSwitchBlock(block, inst, true, .under), + //.switch_block_ref_under_multi => try sema.zirSwitchBlockMulti(block, inst, true, .under), + //.switch_capture => try sema.zirSwitchCapture(block, inst, false, false), + //.switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), + //.switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), + //.switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), + //.switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), + //.switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), + //.type_info => try sema.zirTypeInfo(block, inst), + //.size_of => try sema.zirSizeOf(block, inst), + //.bit_size_of => try sema.zirBitSizeOf(block, inst), + //.typeof => try sema.zirTypeof(block, inst), + //.typeof_elem => try sema.zirTypeofElem(block, inst), + //.log2_int_type => try sema.zirLog2IntType(block, inst), + //.typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst), + //.xor => try sema.zirBitwise(block, inst, .xor), + //.struct_init_empty => try sema.zirStructInitEmpty(block, inst), + //.struct_init => try sema.zirStructInit(block, inst, false), + //.struct_init_ref => try sema.zirStructInit(block, inst, true), + //.struct_init_anon => try sema.zirStructInitAnon(block, inst, false), + //.struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true), + //.array_init => try sema.zirArrayInit(block, inst, false), + //.array_init_ref => try sema.zirArrayInit(block, inst, true), + //.array_init_anon => try sema.zirArrayInitAnon(block, inst, false), + //.array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true), + //.union_init_ptr => try sema.zirUnionInitPtr(block, inst), + //.field_type => try sema.zirFieldType(block, inst), + //.field_type_ref => try sema.zirFieldTypeRef(block, inst), + //.ptr_to_int => try sema.zirPtrToInt(block, inst), + //.align_of => try sema.zirAlignOf(block, inst), + //.bool_to_int => try sema.zirBoolToInt(block, inst), + //.embed_file => try sema.zirEmbedFile(block, inst), + //.error_name => try sema.zirErrorName(block, inst), + //.tag_name => try sema.zirTagName(block, inst), + //.reify => try sema.zirReify(block, inst), + //.type_name => try sema.zirTypeName(block, inst), + //.frame_type => try sema.zirFrameType(block, inst), + //.frame_size => try sema.zirFrameSize(block, inst), + //.float_to_int => try sema.zirFloatToInt(block, inst), + //.int_to_float => try sema.zirIntToFloat(block, inst), + //.int_to_ptr => try sema.zirIntToPtr(block, inst), + //.float_cast => try sema.zirFloatCast(block, inst), + //.int_cast => try sema.zirIntCast(block, inst), + //.err_set_cast => try sema.zirErrSetCast(block, inst), + //.ptr_cast => try sema.zirPtrCast(block, inst), + //.truncate => try sema.zirTruncate(block, inst), + //.align_cast => try sema.zirAlignCast(block, inst), + //.has_decl => try sema.zirHasDecl(block, inst), + //.has_field => try sema.zirHasField(block, inst), + //.clz => try sema.zirClz(block, inst), + //.ctz => try sema.zirCtz(block, inst), + //.pop_count => try sema.zirPopCount(block, inst), + //.byte_swap => try sema.zirByteSwap(block, inst), + //.bit_reverse => try sema.zirBitReverse(block, inst), + //.div_exact => try sema.zirDivExact(block, inst), + //.div_floor => try sema.zirDivFloor(block, inst), + //.div_trunc => try sema.zirDivTrunc(block, inst), + //.mod => try sema.zirMod(block, inst), + //.rem => try sema.zirRem(block, inst), + //.shl_exact => try sema.zirShlExact(block, inst), + //.shr_exact => try sema.zirShrExact(block, inst), + //.bit_offset_of => try sema.zirBitOffsetOf(block, inst), + //.offset_of => try sema.zirOffsetOf(block, inst), + //.cmpxchg_strong => try sema.zirCmpxchg(block, inst), + //.cmpxchg_weak => try sema.zirCmpxchg(block, inst), + //.splat => try sema.zirSplat(block, inst), + //.reduce => try sema.zirReduce(block, inst), + //.shuffle => try sema.zirShuffle(block, inst), + //.atomic_load => try sema.zirAtomicLoad(block, inst), + //.atomic_rmw => try sema.zirAtomicRmw(block, inst), + //.atomic_store => try sema.zirAtomicStore(block, inst), + //.mul_add => try sema.zirMulAdd(block, inst), + //.builtin_call => try sema.zirBuiltinCall(block, inst), + //.field_ptr_type => try sema.zirFieldPtrType(block, inst), + //.field_parent_ptr => try sema.zirFieldParentPtr(block, inst), + //.memcpy => try sema.zirMemcpy(block, inst), + //.memset => try sema.zirMemset(block, inst), + //.builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst), + //.@"resume" => try sema.zirResume(block, inst), + //.@"await" => try sema.zirAwait(block, inst, false), + //.await_nosuspend => try sema.zirAwait(block, inst, true), + //.extended => try sema.zirExtended(block, inst), + + //.sqrt => try sema.zirUnaryMath(block, inst), + //.sin => try sema.zirUnaryMath(block, inst), + //.cos => try sema.zirUnaryMath(block, inst), + //.exp => try sema.zirUnaryMath(block, inst), + //.exp2 => try sema.zirUnaryMath(block, inst), + //.log => try sema.zirUnaryMath(block, inst), + //.log2 => try sema.zirUnaryMath(block, inst), + //.log10 => try sema.zirUnaryMath(block, inst), + //.fabs => try sema.zirUnaryMath(block, inst), + //.floor => try sema.zirUnaryMath(block, inst), + //.ceil => try sema.zirUnaryMath(block, inst), + //.trunc => try sema.zirUnaryMath(block, inst), + //.round => try sema.zirUnaryMath(block, inst), + + //.opaque_decl => try sema.zirOpaqueDecl(block, inst, .parent), + //.opaque_decl_anon => try sema.zirOpaqueDecl(block, inst, .anon), + //.opaque_decl_func => try sema.zirOpaqueDecl(block, inst, .func), + //.error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent), + //.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), + //.error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), + + //.add => try sema.zirArithmetic(block, inst), + //.addwrap => try sema.zirArithmetic(block, inst), + //.div => try sema.zirArithmetic(block, inst), + //.mod_rem => try sema.zirArithmetic(block, inst), + //.mul => try sema.zirArithmetic(block, inst), + //.mulwrap => try sema.zirArithmetic(block, inst), + //.sub => try sema.zirArithmetic(block, inst), + //.subwrap => try sema.zirArithmetic(block, inst), + + //// Instructions that we know to *always* be noreturn based solely on their tag. + //// These functions match the return type of analyzeBody so that we can + //// tail call them here. + //.break_inline => return inst, + //.condbr => return sema.zirCondbr(block, inst), + //.@"break" => return sema.zirBreak(block, inst), + //.compile_error => return sema.zirCompileError(block, inst), + //.ret_coerce => return sema.zirRetCoerce(block, inst, true), + //.ret_node => return sema.zirRetNode(block, inst), + //.ret_err_value => return sema.zirRetErrValue(block, inst), + //.@"unreachable" => return sema.zirUnreachable(block, inst), + //.repeat => return sema.zirRepeat(block, inst), + //.panic => return sema.zirPanic(block, inst), + //// zig fmt: on + + //// Instructions that we know can *never* be noreturn based solely on + //// their tag. We avoid needlessly checking if they are noreturn and + //// continue the loop. + //// We also know that they cannot be referenced later, so we avoid + //// putting them into the map. + //.breakpoint => { + // try sema.zirBreakpoint(block, inst); + // i += 1; + // continue; + //}, + //.fence => { + // try sema.zirFence(block, inst); + // i += 1; + // continue; + //}, + //.dbg_stmt => { + // try sema.zirDbgStmt(block, inst); + // i += 1; + // continue; + //}, + //.ensure_err_payload_void => { + // try sema.zirEnsureErrPayloadVoid(block, inst); + // i += 1; + // continue; + //}, + //.ensure_result_non_error => { + // try sema.zirEnsureResultNonError(block, inst); + // i += 1; + // continue; + //}, + //.ensure_result_used => { + // try sema.zirEnsureResultUsed(block, inst); + // i += 1; + // continue; + //}, + //.set_eval_branch_quota => { + // try sema.zirSetEvalBranchQuota(block, inst); + // i += 1; + // continue; + //}, + //.store => { + // try sema.zirStore(block, inst); + // i += 1; + // continue; + //}, + //.store_node => { + // try sema.zirStoreNode(block, inst); + // i += 1; + // continue; + //}, + //.store_to_block_ptr => { + // try sema.zirStoreToBlockPtr(block, inst); + // i += 1; + // continue; + //}, + //.store_to_inferred_ptr => { + // try sema.zirStoreToInferredPtr(block, inst); + // i += 1; + // continue; + //}, + //.resolve_inferred_alloc => { + // try sema.zirResolveInferredAlloc(block, inst); + // i += 1; + // continue; + //}, + //.validate_struct_init_ptr => { + // try sema.zirValidateStructInitPtr(block, inst); + // i += 1; + // continue; + //}, + //.validate_array_init_ptr => { + // try sema.zirValidateArrayInitPtr(block, inst); + // i += 1; + // continue; + //}, + //.@"export" => { + // try sema.zirExport(block, inst); + // i += 1; + // continue; + //}, + //.set_align_stack => { + // try sema.zirSetAlignStack(block, inst); + // i += 1; + // continue; + //}, + //.set_cold => { + // try sema.zirSetCold(block, inst); + // i += 1; + // continue; + //}, + //.set_float_mode => { + // try sema.zirSetFloatMode(block, inst); + // i += 1; + // continue; + //}, + //.set_runtime_safety => { + // try sema.zirSetRuntimeSafety(block, inst); + // i += 1; + // continue; + //}, // Special case instructions to handle comptime control flow. .repeat_inline => { @@ -505,37 +505,38 @@ pub fn analyzeBody( i = 0; continue; }, - .block_inline => blk: { - // Directly analyze the block body without introducing a new block. - const inst_data = datas[inst].pl_node; - const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); - const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; - const break_inst = try sema.analyzeBody(block, inline_body); - const break_data = datas[break_inst].@"break"; - if (inst == break_data.block_inst) { - break :blk try sema.resolveInst(break_data.operand); - } else { - return break_inst; - } - }, - .condbr_inline => blk: { - const inst_data = datas[inst].pl_node; - const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; - const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); - const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; - const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); - const inline_body = if (cond.val.toBool()) then_body else else_body; - const break_inst = try sema.analyzeBody(block, inline_body); - const break_data = datas[break_inst].@"break"; - if (inst == break_data.block_inst) { - break :blk try sema.resolveInst(break_data.operand); - } else { - return break_inst; - } - }, + //.block_inline => blk: { + // // Directly analyze the block body without introducing a new block. + // const inst_data = datas[inst].pl_node; + // const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); + // const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; + // const break_inst = try sema.analyzeBody(block, inline_body); + // const break_data = datas[break_inst].@"break"; + // if (inst == break_data.block_inst) { + // break :blk try sema.resolveInst(break_data.operand); + // } else { + // return break_inst; + // } + //}, + //.condbr_inline => blk: { + // const inst_data = datas[inst].pl_node; + // const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; + // const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); + // const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; + // const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; + // const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); + // const inline_body = if (cond.val.toBool()) then_body else else_body; + // const break_inst = try sema.analyzeBody(block, inline_body); + // const break_data = datas[break_inst].@"break"; + // if (inst == break_data.block_inst) { + // break :blk try sema.resolveInst(break_data.operand); + // } else { + // return break_inst; + // } + //}, + else => @panic("TODO remove else prong"), }; - if (air_inst.ty.isNoReturn()) + if (sema.getAirType(air_inst).isNoReturn()) return always_noreturn; try map.put(sema.gpa, inst, air_inst); i += 1; @@ -577,18 +578,13 @@ fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } } -/// TODO when we rework AIR memory layout, this function will no longer have a possible error. -pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) error{OutOfMemory}!Air.Inst.Index { +pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) Air.Inst.Ref { var i: usize = @enumToInt(zir_ref); // First section of indexes correspond to a set number of constant values. if (i < Zir.Inst.Ref.typed_value_map.len) { - // TODO when we rework AIR memory layout, this function can be as simple as: - // if (zir_ref < Zir.const_inst_list.len + sema.param_count) - // return zir_ref; - // Until then we allocate memory for a new, mutable `ir.Inst` to match what - // AIR expects. - return sema.mod.constInst(sema.arena, .unneeded, Zir.Inst.Ref.typed_value_map[i]); + // We intentionally map the same indexes to the same values between ZIR and AIR. + return zir_ref; } i -= Zir.Inst.Ref.typed_value_map.len; @@ -1256,7 +1252,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const arg_name = inst_data.get(sema.code); const arg_index = sema.next_arg_index; @@ -1271,7 +1267,7 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air // Set the name of the Air.Arg instruction for use by codegen debug info. const air_arg = sema.param_inst_list[arg_index]; - sema.air.instructions.items(.data)[air_arg].ty_str.str = inst_data.start; + sema.air_instructions.items(.data)[air_arg].ty_str.str = inst_data.start; return air_arg; } @@ -7942,6 +7938,18 @@ fn enumFieldSrcLoc( } else unreachable; } +fn getAirType(sema: *Sema, air_ref: Air.Inst.Ref) Type { + var i: usize = @enumToInt(air_ref); + if (i < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; + } + i -= Air.Inst.Ref.typed_value_map.len; + const air_tags = sema.air_instructions.items(.tag); + const air_datas = sema.air_instructions.items(.data); + assert(air_tags[i] == .const_ty); + return air_datas[i].ty; +} + pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { switch (ty.tag()) { .u8 => return .u8_type, diff --git a/src/codegen.zig b/src/codegen.zig index eaf910977e..a6c4b5ad3c 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -282,7 +282,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return struct { gpa: *Allocator, - air: *const Air, + air: Air, + liveness: Liveness, bin_file: *link.File, target: *const std.Target, mod_fn: *const Module.Fn, @@ -468,8 +469,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { var function = Self{ .gpa = bin_file.allocator, - .air = &air, - .liveness = &liveness, + .air = air, + .liveness = liveness, .target = &bin_file.options.target, .bin_file = bin_file, .mod_fn = module_fn, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index e3f2423746..4743494f35 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -6,7 +6,6 @@ const log = std.log.scoped(.c); const link = @import("../link.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); -const Air = @import("../Air.zig"); const Value = @import("../value.zig").Value; const Type = @import("../type.zig").Type; const TypedValue = @import("../TypedValue.zig"); @@ -14,6 +13,8 @@ const C = link.File.C; const Decl = Module.Decl; const trace = @import("../tracy.zig").trace; const LazySrcLoc = Module.LazySrcLoc; +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const Mutability = enum { Const, Mut }; @@ -37,7 +38,7 @@ const BlockData = struct { result: CValue, }; -pub const CValueMap = std.AutoHashMap(*Inst, CValue); +pub const CValueMap = std.AutoHashMap(Air.Inst.Index, CValue); pub const TypedefMap = std.ArrayHashMap( Type, struct { name: []const u8, rendered: []u8 }, @@ -93,6 +94,8 @@ pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) { /// It is not available when generating .h file. pub const Object = struct { dg: DeclGen, + air: Air, + liveness: Liveness, gpa: *mem.Allocator, code: std.ArrayList(u8), value_map: CValueMap, @@ -102,7 +105,7 @@ pub const Object = struct { next_block_index: usize = 0, indent_writer: IndentWriter(std.ArrayList(u8).Writer), - fn resolveInst(o: *Object, inst: *Inst) !CValue { + fn resolveInst(o: *Object, inst: Air.Inst.Index) !CValue { if (inst.value()) |_| { return CValue{ .constant = inst }; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 45ee2d9bb8..ddf2883259 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -277,6 +277,9 @@ pub const Object = struct { } pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void { + const tracy = trace(@src()); + defer tracy.end(); + var dg: DeclGen = .{ .object = self, .module = module, diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 3d704a8dc5..4da320b087 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -159,7 +159,8 @@ pub const DeclGen = struct { /// The SPIR-V module code should be put in. spv: *SPIRVModule, - air: *const Air, + air: Air, + liveness: Liveness, /// An array of function argument result-ids. Each index corresponds with the /// function argument of the same index. diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 45b00ddfad..912577a358 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -9,13 +9,14 @@ const wasm = std.wasm; const Module = @import("../Module.zig"); const Decl = Module.Decl; -const Air = @import("../Air.zig"); const Type = @import("../type.zig").Type; const Value = @import("../value.zig").Value; const Compilation = @import("../Compilation.zig"); const LazySrcLoc = Module.LazySrcLoc; const link = @import("../link.zig"); const TypedValue = @import("../TypedValue.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); /// Wasm Value, created when generating an instruction const WValue = union(enum) { @@ -491,6 +492,8 @@ pub const Context = struct { /// Reference to the function declaration the code /// section belongs to decl: *Decl, + air: Air, + liveness: Liveness, gpa: *mem.Allocator, /// Table to save `WValue`'s generated by an `Inst` values: ValueTable, @@ -710,52 +713,53 @@ pub const Context = struct { } } + pub fn genFunc(self: *Context, func: *Module.Fn) InnerError!Result { + try self.genFunctype(); + + // Write instructions + // TODO: check for and handle death of instructions + + // Reserve space to write the size after generating the code as well as space for locals count + try self.code.resize(10); + + try self.genBody(func.body); + + // finally, write our local types at the 'offset' position + { + leb.writeUnsignedFixed(5, self.code.items[5..10], @intCast(u32, self.locals.items.len)); + + // offset into 'code' section where we will put our locals types + var local_offset: usize = 10; + + // emit the actual locals amount + for (self.locals.items) |local| { + var buf: [6]u8 = undefined; + leb.writeUnsignedFixed(5, buf[0..5], @as(u32, 1)); + buf[5] = local; + try self.code.insertSlice(local_offset, &buf); + local_offset += 6; + } + } + + const writer = self.code.writer(); + try writer.writeByte(wasm.opcode(.end)); + + // Fill in the size of the generated code to the reserved space at the + // beginning of the buffer. + const size = self.code.items.len - 5 + self.decl.fn_link.wasm.idx_refs.items.len * 5; + leb.writeUnsignedFixed(5, self.code.items[0..5], @intCast(u32, size)); + + // codegen data has been appended to `code` + return Result.appended; + } + /// Generates the wasm bytecode for the function declaration belonging to `Context` pub fn gen(self: *Context, typed_value: TypedValue) InnerError!Result { switch (typed_value.ty.zigTypeTag()) { .Fn => { try self.genFunctype(); - - // Write instructions - // TODO: check for and handle death of instructions - const mod_fn = blk: { - if (typed_value.val.castTag(.function)) |func| break :blk func.data; - if (typed_value.val.castTag(.extern_fn)) |_| return Result.appended; // don't need code body for extern functions - unreachable; - }; - - // Reserve space to write the size after generating the code as well as space for locals count - try self.code.resize(10); - - try self.genBody(mod_fn.body); - - // finally, write our local types at the 'offset' position - { - leb.writeUnsignedFixed(5, self.code.items[5..10], @intCast(u32, self.locals.items.len)); - - // offset into 'code' section where we will put our locals types - var local_offset: usize = 10; - - // emit the actual locals amount - for (self.locals.items) |local| { - var buf: [6]u8 = undefined; - leb.writeUnsignedFixed(5, buf[0..5], @as(u32, 1)); - buf[5] = local; - try self.code.insertSlice(local_offset, &buf); - local_offset += 6; - } - } - - const writer = self.code.writer(); - try writer.writeByte(wasm.opcode(.end)); - - // Fill in the size of the generated code to the reserved space at the - // beginning of the buffer. - const size = self.code.items.len - 5 + self.decl.fn_link.wasm.idx_refs.items.len * 5; - leb.writeUnsignedFixed(5, self.code.items[0..5], @intCast(u32, size)); - - // codegen data has been appended to `code` - return Result.appended; + if (typed_value.val.castTag(.extern_fn)) |_| return Result.appended; // don't need code body for extern functions + return self.fail("TODO implement wasm codegen for function pointers", .{}); }, .Array => { if (typed_value.val.castTag(.bytes)) |payload| { diff --git a/src/link.zig b/src/link.zig index 02d9afaf07..2403180ec8 100644 --- a/src/link.zig +++ b/src/link.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const Allocator = std.mem.Allocator; const fs = std.fs; @@ -14,8 +15,10 @@ const Cache = @import("Cache.zig"); const build_options = @import("build_options"); const LibCInstallation = @import("libc_installation.zig").LibCInstallation; const wasi_libc = @import("wasi_libc.zig"); +const Air = @import("Air.zig"); +const Liveness = @import("Liveness.zig"); -pub const producer_string = if (std.builtin.is_test) "zig test" else "zig " ++ build_options.version; +pub const producer_string = if (builtin.is_test) "zig test" else "zig " ++ build_options.version; pub const Emit = struct { /// Where the output will go. @@ -313,13 +316,34 @@ pub const File = struct { log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty }); assert(decl.has_tv); switch (base.tag) { - .coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl), - .elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl), + // zig fmt: off + .coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl), + .elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl), .macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl), - .c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl), - .wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl), + .c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl), + .wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl), .spirv => return @fieldParentPtr(SpirV, "base", base).updateDecl(module, decl), .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDecl(module, decl), + // zig fmt: on + } + } + + /// May be called before or after updateDeclExports but must be called + /// after allocateDeclIndexes for any given Decl. + pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + log.debug("updateFunc {*} ({s}), type={}", .{ + func.owner_decl, func.owner_decl.name, func.owner_decl.ty, + }); + switch (base.tag) { + // zig fmt: off + .coff => return @fieldParentPtr(Coff, "base", base).updateFunc(module, func, air, liveness), + .elf => return @fieldParentPtr(Elf, "base", base).updateFunc(module, func, air, liveness), + .macho => return @fieldParentPtr(MachO, "base", base).updateFunc(module, func, air, liveness), + .c => return @fieldParentPtr(C, "base", base).updateFunc(module, func, air, liveness), + .wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func, air, liveness), + .spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func, air, liveness), + .plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func, air, liveness), + // zig fmt: on } } diff --git a/src/link/C.zig b/src/link/C.zig index 53561d16cd..09f789f7d1 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -2,14 +2,17 @@ const std = @import("std"); const mem = std.mem; const assert = std.debug.assert; const Allocator = std.mem.Allocator; +const fs = std.fs; + +const C = @This(); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); -const fs = std.fs; const codegen = @import("../codegen/c.zig"); const link = @import("../link.zig"); const trace = @import("../tracy.zig").trace; -const C = @This(); const Type = @import("../type.zig").Type; +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); pub const base_tag: link.File.Tag = .c; pub const zig_h = @embedFile("C/zig.h"); @@ -95,10 +98,7 @@ fn deinitDecl(gpa: *Allocator, decl: *Module.Decl) void { decl.fn_link.c.typedefs.deinit(gpa); } -pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { - const tracy = trace(@src()); - defer tracy.end(); - +pub fn finishUpdateDecl(self: *C, module: *Module, decl: *Module.Decl, air: Air, liveness: Liveness) !void { // Keep track of all decls so we can iterate over them on flush(). _ = try self.decl_table.getOrPut(self.base.allocator, decl); @@ -126,6 +126,8 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { .code = code.toManaged(module.gpa), .value_map = codegen.CValueMap.init(module.gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code + .air = air, + .liveness = liveness, }; object.indent_writer = .{ .underlying_writer = object.code.writer() }; defer { @@ -157,6 +159,20 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { code.shrinkAndFree(module.gpa, code.items.len); } +pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + const tracy = trace(@src()); + defer tracy.end(); + + return self.finishUpdateDecl(module, func.owner_decl, air, liveness); +} + +pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { + const tracy = trace(@src()); + defer tracy.end(); + + return self.finishUpdateDecl(module, decl, undefined, undefined); +} + pub fn updateDeclLineNumber(self: *C, module: *Module, decl: *Module.Decl) !void { // The C backend does not have the ability to fix line numbers without re-generating // the entire Decl. diff --git a/src/link/Coff.zig b/src/link/Coff.zig index b466cf9136..44442b73a3 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1,6 +1,7 @@ const Coff = @This(); const std = @import("std"); +const builtin = @import("builtin"); const log = std.log.scoped(.link); const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -17,6 +18,8 @@ const build_options = @import("build_options"); const Cache = @import("../Cache.zig"); const mingw = @import("../mingw.zig"); const llvm_backend = @import("../codegen/llvm.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const allocation_padding = 4 / 3; const minimum_text_block_size = 64 * allocation_padding; @@ -653,19 +656,58 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { } } -pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { - // TODO COFF/PE debug information - // TODO Implement exports +pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .coff and builtin.object_format != .pe) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } const tracy = trace(@src()); defer tracy.end(); - if (build_options.have_llvm) - if (self.llvm_object) |llvm_object| return try llvm_object.updateDecl(module, decl); + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + const res = try codegen.generateFunction( + &self.base, + decl.srcLoc(), + func, + air, + liveness, + &code_buffer, + .none, + ); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + return self.finishUpdateDecl(module, func.owner_decl, code); +} + +pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native and builtin.object_format != .coff and builtin.object_format != .pe) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } + const tracy = trace(@src()); + defer tracy.end(); if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? } + // TODO COFF/PE debug information + // TODO Implement exports + var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); @@ -683,6 +725,10 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { }, }; + return self.finishUpdateDecl(module, func.owner_decl, code); +} + +fn finishUpdateDecl(self: *Coff, decl: *Module.Decl, code: []const u8) !void { const required_alignment = decl.ty.abiAlignment(self.base.options.target); const curr_size = decl.link.coff.size; if (curr_size != 0) { diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 90224866ba..0d05b97846 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1,6 +1,7 @@ const Elf = @This(); const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const assert = std.debug.assert; const Allocator = std.mem.Allocator; @@ -10,7 +11,6 @@ const log = std.log.scoped(.link); const DW = std.dwarf; const leb128 = std.leb; -const Air = @import("../Air.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); const codegen = @import("../codegen.zig"); @@ -26,6 +26,8 @@ const glibc = @import("../glibc.zig"); const musl = @import("../musl.zig"); const Cache = @import("../Cache.zig"); const llvm_backend = @import("../codegen/llvm.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const default_entry_addr = 0x8000000; @@ -2155,138 +2157,17 @@ pub fn freeDecl(self: *Elf, decl: *Module.Decl) void { } } -pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { - const tracy = trace(@src()); - defer tracy.end(); - - if (build_options.have_llvm) - if (self.llvm_object) |llvm_object| return try llvm_object.updateDecl(module, decl); - - if (decl.val.tag() == .extern_fn) { - return; // TODO Should we do more when front-end analyzed extern decl? - } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; - if (variable.is_extern) { - return; // TODO Should we do more when front-end analyzed extern decl? - } - } - - var code_buffer = std.ArrayList(u8).init(self.base.allocator); - defer code_buffer.deinit(); - - var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); - defer dbg_line_buffer.deinit(); - - var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); - defer dbg_info_buffer.deinit(); - - var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; - defer { - var it = dbg_info_type_relocs.valueIterator(); - while (it.next()) |value| { - value.relocs.deinit(self.base.allocator); - } - dbg_info_type_relocs.deinit(self.base.allocator); - } - - const is_fn: bool = switch (decl.ty.zigTypeTag()) { - .Fn => true, - else => false, - }; - if (is_fn) { - // For functions we need to add a prologue to the debug line program. - try dbg_line_buffer.ensureCapacity(26); - - const func = decl.val.castTag(.function).?.data; - const line_off = @intCast(u28, decl.src_line + func.lbrace_line); - - const ptr_width_bytes = self.ptrWidthBytes(); - dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{ - DW.LNS_extended_op, - ptr_width_bytes + 1, - DW.LNE_set_address, - }); - // This is the "relocatable" vaddr, corresponding to `code_buffer` index `0`. - assert(dbg_line_vaddr_reloc_index == dbg_line_buffer.items.len); - dbg_line_buffer.items.len += ptr_width_bytes; - - dbg_line_buffer.appendAssumeCapacity(DW.LNS_advance_line); - // This is the "relocatable" relative line offset from the previous function's end curly - // to this function's begin curly. - assert(self.getRelocDbgLineOff() == dbg_line_buffer.items.len); - // Here we use a ULEB128-fixed-4 to make sure this field can be overwritten later. - leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line_off); - - dbg_line_buffer.appendAssumeCapacity(DW.LNS_set_file); - assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len); - // Once we support more than one source file, this will have the ability to be more - // than one possible value. - const file_index = 1; - leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index); - - // Emit a line for the begin curly with prologue_end=false. The codegen will - // do the work of setting prologue_end=true and epilogue_begin=true. - dbg_line_buffer.appendAssumeCapacity(DW.LNS_copy); - - // .debug_info subprogram - const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1]; - try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len); - - const fn_ret_type = decl.ty.fnReturnType(); - const fn_ret_has_bits = fn_ret_type.hasCodeGenBits(); - if (fn_ret_has_bits) { - dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram); - } else { - dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram_retvoid); - } - // These get overwritten after generating the machine code. These values are - // "relocations" and have to be in this fixed place so that functions can be - // moved in virtual address space. - assert(dbg_info_low_pc_reloc_index == dbg_info_buffer.items.len); - dbg_info_buffer.items.len += ptr_width_bytes; // DW.AT_low_pc, DW.FORM_addr - assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len); - dbg_info_buffer.items.len += 4; // DW.AT_high_pc, DW.FORM_data4 - if (fn_ret_has_bits) { - const gop = try dbg_info_type_relocs.getOrPut(self.base.allocator, fn_ret_type); - if (!gop.found_existing) { - gop.value_ptr.* = .{ - .off = undefined, - .relocs = .{}, - }; - } - try gop.value_ptr.relocs.append(self.base.allocator, @intCast(u32, dbg_info_buffer.items.len)); - dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4 - } - dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string - } else { - // TODO implement .debug_info for global variables +fn deinitRelocs(gpa: *Allocator, table: *File.DbgInfoTypeRelocsTable) void { + var it = table.valueIterator(); + while (it.next()) |value| { + value.relocs.deinit(gpa); } - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ - .ty = decl.ty, - .val = decl_val, - }, &code_buffer, .{ - .dwarf = .{ - .dbg_line = &dbg_line_buffer, - .dbg_info = &dbg_info_buffer, - .dbg_info_type_relocs = &dbg_info_type_relocs, - }, - }); - const code = switch (res) { - .externally_managed => |x| x, - .appended => code_buffer.items, - .fail => |em| { - decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); - return; - }, - }; + table.deinit(gpa); +} +fn updateDeclCode(self: *Elf, decl: *Module.Decl, code: []const u8, stt_bits: u8) !*elf.Elf64_Sym { const required_alignment = decl.ty.abiAlignment(self.base.options.target); - const stt_bits: u8 = if (is_fn) elf.STT_FUNC else elf.STT_OBJECT; - assert(decl.link.elf.local_sym_index != 0); // Caller forgot to allocateDeclIndexes() const local_sym = &self.local_symbols.items[decl.link.elf.local_sym_index]; if (local_sym.st_size != 0) { @@ -2338,128 +2219,16 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { const file_offset = self.sections.items[self.text_section_index.?].sh_offset + section_offset; try self.base.file.?.pwriteAll(code, file_offset); - const target_endian = self.base.options.target.cpu.arch.endian(); - - const text_block = &decl.link.elf; - - // If the Decl is a function, we need to update the .debug_line program. - if (is_fn) { - // Perform the relocations based on vaddr. - switch (self.ptr_width) { - .p32 => { - { - const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); - } - { - const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); - } - }, - .p64 => { - { - const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..8]; - mem.writeInt(u64, ptr, local_sym.st_value, target_endian); - } - { - const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..8]; - mem.writeInt(u64, ptr, local_sym.st_value, target_endian); - } - }, - } - { - const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_size), target_endian); - } - - try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS_extended_op, 1, DW.LNE_end_sequence }); - - // Now we have the full contents and may allocate a region to store it. - - // This logic is nearly identical to the logic below in `updateDeclDebugInfoAllocation` for - // `TextBlock` and the .debug_info. If you are editing this logic, you - // probably need to edit that logic too. - - const debug_line_sect = &self.sections.items[self.debug_line_section_index.?]; - const src_fn = &decl.fn_link.elf; - src_fn.len = @intCast(u32, dbg_line_buffer.items.len); - if (self.dbg_line_fn_last) |last| not_first: { - if (src_fn.next) |next| { - // Update existing function - non-last item. - if (src_fn.off + src_fn.len + min_nop_size > next.off) { - // It grew too big, so we move it to a new location. - if (src_fn.prev) |prev| { - self.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {}; - prev.next = src_fn.next; - } - assert(src_fn.prev != next); - next.prev = src_fn.prev; - src_fn.next = null; - // Populate where it used to be with NOPs. - const file_pos = debug_line_sect.sh_offset + src_fn.off; - try self.pwriteDbgLineNops(0, &[0]u8{}, src_fn.len, file_pos); - // TODO Look at the free list before appending at the end. - src_fn.prev = last; - last.next = src_fn; - self.dbg_line_fn_last = src_fn; - - src_fn.off = last.off + padToIdeal(last.len); - } - } else if (src_fn.prev == null) { - if (src_fn == last) { - // Special case: there is only 1 function and it is being updated. - // In this case there is nothing to do. The function's length has - // already been updated, and the logic below takes care of - // resizing the .debug_line section. - break :not_first; - } - // Append new function. - // TODO Look at the free list before appending at the end. - src_fn.prev = last; - last.next = src_fn; - self.dbg_line_fn_last = src_fn; - - src_fn.off = last.off + padToIdeal(last.len); - } - } else { - // This is the first function of the Line Number Program. - self.dbg_line_fn_first = src_fn; - self.dbg_line_fn_last = src_fn; - - src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes()); - } - - const last_src_fn = self.dbg_line_fn_last.?; - const needed_size = last_src_fn.off + last_src_fn.len; - if (needed_size != debug_line_sect.sh_size) { - if (needed_size > self.allocatedSize(debug_line_sect.sh_offset)) { - const new_offset = self.findFreeSpace(needed_size, 1); - const existing_size = last_src_fn.off; - log.debug("moving .debug_line section: {d} bytes from 0x{x} to 0x{x}", .{ - existing_size, - debug_line_sect.sh_offset, - new_offset, - }); - const amt = try self.base.file.?.copyRangeAll(debug_line_sect.sh_offset, self.base.file.?, new_offset, existing_size); - if (amt != existing_size) return error.InputOutput; - debug_line_sect.sh_offset = new_offset; - } - debug_line_sect.sh_size = needed_size; - self.shdr_table_dirty = true; // TODO look into making only the one section dirty - self.debug_line_header_dirty = true; - } - const prev_padding_size: u32 = if (src_fn.prev) |prev| src_fn.off - (prev.off + prev.len) else 0; - const next_padding_size: u32 = if (src_fn.next) |next| next.off - (src_fn.off + src_fn.len) else 0; - - // We only have support for one compilation unit so far, so the offsets are directly - // from the .debug_line section. - const file_pos = debug_line_sect.sh_offset + src_fn.off; - try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos); - - // .debug_info - End the TAG_subprogram children. - try dbg_info_buffer.append(0); - } + return local_sym; +} +fn finishUpdateDecl( + self: *Elf, + module: *Module, + decl: *Module.Decl, + dbg_info_type_relocs: *File.DbgInfoTypeRelocsTable, + dbg_info_buffer: *std.ArrayList(u8), +) !void { // Now we emit the .debug_info types of the Decl. These will count towards the size of // the buffer, so we have to do it before computing the offset, and we can't perform the actual // relocations yet. @@ -2467,12 +2236,15 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { var it = dbg_info_type_relocs.iterator(); while (it.next()) |entry| { entry.value_ptr.off = @intCast(u32, dbg_info_buffer.items.len); - try self.addDbgInfoType(entry.key_ptr.*, &dbg_info_buffer); + try self.addDbgInfoType(entry.key_ptr.*, dbg_info_buffer); } } + const text_block = &decl.link.elf; try self.updateDeclDebugInfoAllocation(text_block, @intCast(u32, dbg_info_buffer.items.len)); + const target_endian = self.base.options.target.cpu.arch.endian(); + { // Now that we have the offset assigned we can finally perform type relocations. var it = dbg_info_type_relocs.valueIterator(); @@ -2495,6 +2267,290 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { return self.updateDeclExports(module, decl, decl_exports); } +pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .elf) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } + + const tracy = trace(@src()); + defer tracy.end(); + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_line_buffer.deinit(); + + var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_info_buffer.deinit(); + + var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; + defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs); + + // For functions we need to add a prologue to the debug line program. + try dbg_line_buffer.ensureCapacity(26); + + const decl = func.owner_decl; + const line_off = @intCast(u28, decl.src_line + func.lbrace_line); + + const ptr_width_bytes = self.ptrWidthBytes(); + dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{ + DW.LNS_extended_op, + ptr_width_bytes + 1, + DW.LNE_set_address, + }); + // This is the "relocatable" vaddr, corresponding to `code_buffer` index `0`. + assert(dbg_line_vaddr_reloc_index == dbg_line_buffer.items.len); + dbg_line_buffer.items.len += ptr_width_bytes; + + dbg_line_buffer.appendAssumeCapacity(DW.LNS_advance_line); + // This is the "relocatable" relative line offset from the previous function's end curly + // to this function's begin curly. + assert(self.getRelocDbgLineOff() == dbg_line_buffer.items.len); + // Here we use a ULEB128-fixed-4 to make sure this field can be overwritten later. + leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line_off); + + dbg_line_buffer.appendAssumeCapacity(DW.LNS_set_file); + assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len); + // Once we support more than one source file, this will have the ability to be more + // than one possible value. + const file_index = 1; + leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index); + + // Emit a line for the begin curly with prologue_end=false. The codegen will + // do the work of setting prologue_end=true and epilogue_begin=true. + dbg_line_buffer.appendAssumeCapacity(DW.LNS_copy); + + // .debug_info subprogram + const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1]; + try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len); + + const fn_ret_type = decl.ty.fnReturnType(); + const fn_ret_has_bits = fn_ret_type.hasCodeGenBits(); + if (fn_ret_has_bits) { + dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram); + } else { + dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram_retvoid); + } + // These get overwritten after generating the machine code. These values are + // "relocations" and have to be in this fixed place so that functions can be + // moved in virtual address space. + assert(dbg_info_low_pc_reloc_index == dbg_info_buffer.items.len); + dbg_info_buffer.items.len += ptr_width_bytes; // DW.AT_low_pc, DW.FORM_addr + assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len); + dbg_info_buffer.items.len += 4; // DW.AT_high_pc, DW.FORM_data4 + if (fn_ret_has_bits) { + const gop = try dbg_info_type_relocs.getOrPut(self.base.allocator, fn_ret_type); + if (!gop.found_existing) { + gop.value_ptr.* = .{ + .off = undefined, + .relocs = .{}, + }; + } + try gop.value_ptr.relocs.append(self.base.allocator, @intCast(u32, dbg_info_buffer.items.len)); + dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4 + } + dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string + + const res = try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg_line_buffer, + .dbg_info = &dbg_info_buffer, + .dbg_info_type_relocs = &dbg_info_type_relocs, + }, + }); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + const local_sym = try self.updateDeclCode(decl, code, elf.STT_FUNC); + + const target_endian = self.base.options.target.cpu.arch.endian(); + + // Since the Decl is a function, we need to update the .debug_line program. + // Perform the relocations based on vaddr. + switch (self.ptr_width) { + .p32 => { + { + const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4]; + mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); + } + { + const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4]; + mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); + } + }, + .p64 => { + { + const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..8]; + mem.writeInt(u64, ptr, local_sym.st_value, target_endian); + } + { + const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..8]; + mem.writeInt(u64, ptr, local_sym.st_value, target_endian); + } + }, + } + { + const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4]; + mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_size), target_endian); + } + + try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS_extended_op, 1, DW.LNE_end_sequence }); + + // Now we have the full contents and may allocate a region to store it. + + // This logic is nearly identical to the logic below in `updateDeclDebugInfoAllocation` for + // `TextBlock` and the .debug_info. If you are editing this logic, you + // probably need to edit that logic too. + + const debug_line_sect = &self.sections.items[self.debug_line_section_index.?]; + const src_fn = &decl.fn_link.elf; + src_fn.len = @intCast(u32, dbg_line_buffer.items.len); + if (self.dbg_line_fn_last) |last| not_first: { + if (src_fn.next) |next| { + // Update existing function - non-last item. + if (src_fn.off + src_fn.len + min_nop_size > next.off) { + // It grew too big, so we move it to a new location. + if (src_fn.prev) |prev| { + self.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {}; + prev.next = src_fn.next; + } + assert(src_fn.prev != next); + next.prev = src_fn.prev; + src_fn.next = null; + // Populate where it used to be with NOPs. + const file_pos = debug_line_sect.sh_offset + src_fn.off; + try self.pwriteDbgLineNops(0, &[0]u8{}, src_fn.len, file_pos); + // TODO Look at the free list before appending at the end. + src_fn.prev = last; + last.next = src_fn; + self.dbg_line_fn_last = src_fn; + + src_fn.off = last.off + padToIdeal(last.len); + } + } else if (src_fn.prev == null) { + if (src_fn == last) { + // Special case: there is only 1 function and it is being updated. + // In this case there is nothing to do. The function's length has + // already been updated, and the logic below takes care of + // resizing the .debug_line section. + break :not_first; + } + // Append new function. + // TODO Look at the free list before appending at the end. + src_fn.prev = last; + last.next = src_fn; + self.dbg_line_fn_last = src_fn; + + src_fn.off = last.off + padToIdeal(last.len); + } + } else { + // This is the first function of the Line Number Program. + self.dbg_line_fn_first = src_fn; + self.dbg_line_fn_last = src_fn; + + src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes()); + } + + const last_src_fn = self.dbg_line_fn_last.?; + const needed_size = last_src_fn.off + last_src_fn.len; + if (needed_size != debug_line_sect.sh_size) { + if (needed_size > self.allocatedSize(debug_line_sect.sh_offset)) { + const new_offset = self.findFreeSpace(needed_size, 1); + const existing_size = last_src_fn.off; + log.debug("moving .debug_line section: {d} bytes from 0x{x} to 0x{x}", .{ + existing_size, + debug_line_sect.sh_offset, + new_offset, + }); + const amt = try self.base.file.?.copyRangeAll(debug_line_sect.sh_offset, self.base.file.?, new_offset, existing_size); + if (amt != existing_size) return error.InputOutput; + debug_line_sect.sh_offset = new_offset; + } + debug_line_sect.sh_size = needed_size; + self.shdr_table_dirty = true; // TODO look into making only the one section dirty + self.debug_line_header_dirty = true; + } + const prev_padding_size: u32 = if (src_fn.prev) |prev| src_fn.off - (prev.off + prev.len) else 0; + const next_padding_size: u32 = if (src_fn.next) |next| next.off - (src_fn.off + src_fn.len) else 0; + + // We only have support for one compilation unit so far, so the offsets are directly + // from the .debug_line section. + const file_pos = debug_line_sect.sh_offset + src_fn.off; + try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos); + + // .debug_info - End the TAG_subprogram children. + try dbg_info_buffer.append(0); + + return self.finishUpdateDecl(module, decl, &dbg_info_type_relocs, &dbg_info_buffer); +} + +pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native and builtin.object_format != .elf) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } + + const tracy = trace(@src()); + defer tracy.end(); + + if (decl.val.tag() == .extern_fn) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + if (decl.val.castTag(.variable)) |payload| { + const variable = payload.data; + if (variable.is_extern) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + } + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_info_buffer.deinit(); + + var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; + defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs); + + // TODO implement .debug_info for global variables + const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + .ty = decl.ty, + .val = decl_val, + }, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg_line_buffer, + .dbg_info = &dbg_info_buffer, + .dbg_info_type_relocs = &dbg_info_type_relocs, + }, + }); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + _ = try self.updateDeclCode(decl, code, elf.STT_OBJECT); + return self.finishUpdateDecl(module, decl, &dbg_info_type_relocs, &dbg_info_buffer); +} + /// Asserts the type has codegen bits. fn addDbgInfoType(self: *Elf, ty: Type, dbg_info_buffer: *std.ArrayList(u8)) !void { switch (ty.zigTypeTag()) { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index df2e0134e4..cd020c1b27 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1,6 +1,7 @@ const MachO = @This(); const std = @import("std"); +const builtin = @import("builtin"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; const fmt = std.fmt; @@ -22,6 +23,8 @@ const link = @import("../link.zig"); const File = link.File; const Cache = @import("../Cache.zig"); const target_util = @import("../target.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const DebugSymbols = @import("MachO/DebugSymbols.zig"); const Trie = @import("MachO/Trie.zig"); @@ -1132,7 +1135,55 @@ pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void { }; } +pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .macho) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } + const tracy = trace(@src()); + defer tracy.end(); + + const decl = func.owner_decl; + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var debug_buffers = if (self.d_sym) |*ds| try ds.initDeclDebugBuffers(self.base.allocator, module, decl) else null; + defer { + if (debug_buffers) |*dbg| { + dbg.dbg_line_buffer.deinit(); + dbg.dbg_info_buffer.deinit(); + var it = dbg.dbg_info_type_relocs.valueIterator(); + while (it.next()) |value| { + value.relocs.deinit(self.base.allocator); + } + dbg.dbg_info_type_relocs.deinit(self.base.allocator); + } + } + + const res = if (debug_buffers) |*dbg| + try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg.dbg_line_buffer, + .dbg_info = &dbg.dbg_info_buffer, + .dbg_info_type_relocs = &dbg.dbg_info_type_relocs, + }, + }) + else + try codegen.generateSymbol(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); + + return self.finishUpdateDecl(module, decl, res); +} + pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native and builtin.object_format != .macho) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } const tracy = trace(@src()); defer tracy.end(); @@ -1173,6 +1224,10 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { .val = decl.val, }, &code_buffer, .none); + return self.finishUpdateDecl(module, decl, res); +} + +fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: codegen.Result) !void { const code = switch (res) { .externally_managed => |x| x, .appended => code_buffer.items, diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 80a92f9cdb..bc044ce414 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -2,18 +2,21 @@ //! would be to add incremental linking in a similar way as ELF does. const Plan9 = @This(); - -const std = @import("std"); const link = @import("../link.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); const aout = @import("Plan9/aout.zig"); const codegen = @import("../codegen.zig"); const trace = @import("../tracy.zig").trace; -const mem = std.mem; const File = link.File; -const Allocator = std.mem.Allocator; +const build_options = @import("build_options"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); +const std = @import("std"); +const builtin = @import("builtin"); +const mem = std.mem; +const Allocator = std.mem.Allocator; const log = std.log.scoped(.link); const assert = std.debug.assert; @@ -120,6 +123,19 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Plan9 { return self; } +pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .plan9) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + _ = module; + // Keep track of all decls so we can iterate over them on flush(). + _ = try self.decl_table.getOrPut(self.base.allocator, func.owner_decl); + + _ = air; + _ = liveness; + @panic("TODO Plan9 needs to keep track of Air and Liveness so it can use them later"); +} + pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void { _ = module; _ = try self.decl_table.getOrPut(self.base.allocator, decl); @@ -138,6 +154,9 @@ pub fn flush(self: *Plan9, comp: *Compilation) !void { } pub fn flushModule(self: *Plan9, comp: *Compilation) !void { + if (build_options.skip_non_native and builtin.object_format != .plan9) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } _ = comp; const tracy = trace(@src()); defer tracy.end(); @@ -199,7 +218,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { } } if (std.mem.eql(u8, exp.options.name, "_start")) { - std.debug.assert(decl.link.plan9.type == .t); // we tried to link a non-function as the entry + assert(decl.link.plan9.type == .t); // we tried to link a non-function as the entry self.entry_decl = decl; } if (exp.link.plan9) |i| { diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 8a2e877d42..bc9e560582 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -36,6 +36,8 @@ const ResultId = codegen.ResultId; const trace = @import("../tracy.zig").trace; const build_options = @import("build_options"); const spec = @import("../codegen/spirv/spec.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); // TODO: Should this struct be used at all rather than just a hashmap of aux data for every decl? pub const FnData = struct { @@ -101,7 +103,23 @@ pub fn deinit(self: *SpirV) void { self.decl_table.deinit(self.base.allocator); } +pub fn updateFunc(self: *SpirV, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native) { + @panic("Attempted to compile for architecture that was disabled by build configuration"); + } + _ = module; + // Keep track of all decls so we can iterate over them on flush(). + _ = try self.decl_table.getOrPut(self.base.allocator, func.owner_decl); + + _ = air; + _ = liveness; + @panic("TODO SPIR-V needs to keep track of Air and Liveness so it can use them later"); +} + pub fn updateDecl(self: *SpirV, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native) { + @panic("Attempted to compile for architecture that was disabled by build configuration"); + } _ = module; // Keep track of all decls so we can iterate over them on flush(). _ = try self.decl_table.getOrPut(self.base.allocator, decl); @@ -132,13 +150,13 @@ pub fn flush(self: *SpirV, comp: *Compilation) !void { } pub fn flushModule(self: *SpirV, comp: *Compilation) !void { - const tracy = trace(@src()); - defer tracy.end(); - if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } + const tracy = trace(@src()); + defer tracy.end(); + const module = self.base.options.module.?; const target = comp.getTarget(); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 15a36a4bcc..be6ad78701 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1,6 +1,7 @@ const Wasm = @This(); const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -18,6 +19,8 @@ const build_options = @import("build_options"); const wasi_libc = @import("../wasi_libc.zig"); const Cache = @import("../Cache.zig"); const TypedValue = @import("../TypedValue.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); pub const base_tag = link.File.Tag.wasm; @@ -186,11 +189,60 @@ pub fn allocateDeclIndexes(self: *Wasm, decl: *Module.Decl) !void { } } +pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .wasm) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } + const decl = func.owner_decl; + assert(decl.link.wasm.init); // Must call allocateDeclIndexes() + + const fn_data = &decl.fn_link.wasm; + fn_data.functype.items.len = 0; + fn_data.code.items.len = 0; + fn_data.idx_refs.items.len = 0; + + var context = codegen.Context{ + .gpa = self.base.allocator, + .air = air, + .liveness = liveness, + .values = .{}, + .code = fn_data.code.toManaged(self.base.allocator), + .func_type_data = fn_data.functype.toManaged(self.base.allocator), + .decl = decl, + .err_msg = undefined, + .locals = .{}, + .target = self.base.options.target, + .global_error_set = self.base.options.module.?.global_error_set, + }; + defer context.deinit(); + + // generate the 'code' section for the function declaration + const result = context.genFunc(func) catch |err| switch (err) { + error.CodegenFail => { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, context.err_msg); + return; + }, + else => |e| return e, + }; + return self.finishUpdateDecl(decl, result); +} + // Generate code for the Decl, storing it in memory to be later written to // the file on flush(). pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { - std.debug.assert(decl.link.wasm.init); // Must call allocateDeclIndexes() + if (build_options.skip_non_native and builtin.object_format != .wasm) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } + assert(decl.link.wasm.init); // Must call allocateDeclIndexes() + // TODO don't use this for non-functions const fn_data = &decl.fn_link.wasm; fn_data.functype.items.len = 0; fn_data.code.items.len = 0; @@ -218,7 +270,10 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { }, else => |e| return e, }; + return self.finishUpdateDecl(decl, result); +} +fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, result: codegen.Result) !void { const code: []const u8 = switch (result) { .appended => @as([]const u8, context.code.items), .externally_managed => |payload| payload, @@ -521,7 +576,7 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void { var data_offset = offset_table_size; while (cur) |cur_block| : (cur = cur_block.next) { if (cur_block.size == 0) continue; - std.debug.assert(cur_block.init); + assert(cur_block.init); const offset = (cur_block.offset_index) * ptr_width; var buf: [4]u8 = undefined; -- cgit v1.2.3 From c09b973ec25f328f5e15e9e6eed4da7f5e4634af Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 13 Jul 2021 15:45:08 -0700 Subject: stage2: compile error fixes for AIR memory layout branch Now the branch is compiling again, provided that one uses `-Dskip-non-native`, but many code paths are disabled. The code paths can now be re-enabled one at a time and updated to conform to the new AIR memory layout. --- src/Air.zig | 30 +- src/Compilation.zig | 2 +- src/Liveness.zig | 71 ++-- src/Module.zig | 34 +- src/Sema.zig | 986 +++++++++++++++++++++++++++++----------------------- src/codegen.zig | 159 +++++---- src/codegen/c.zig | 204 +++++------ src/link/Elf.zig | 3 + src/value.zig | 2 +- 9 files changed, 851 insertions(+), 640 deletions(-) (limited to 'src/Compilation.zig') diff --git a/src/Air.zig b/src/Air.zig index e85f2e5c43..1f294c43f3 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -332,12 +332,12 @@ pub const Block = struct { body_len: u32, }; -/// Trailing is a list of `Ref` for every `args_len`. +/// Trailing is a list of `Inst.Ref` for every `args_len`. pub const Call = struct { args_len: u32, }; -/// This data is stored inside extra, with two sets of trailing `Ref`: +/// This data is stored inside extra, with two sets of trailing `Inst.Ref`: /// * 0. the then body, according to `then_body_len`. /// * 1. the else body, according to `else_body_len`. pub const CondBr = struct { @@ -355,19 +355,19 @@ pub const SwitchBr = struct { /// Trailing: /// * instruction index for each `body_len`. pub const Case = struct { - item: Ref, + item: Inst.Ref, body_len: u32, }; }; pub const StructField = struct { - struct_ptr: Ref, + struct_ptr: Inst.Ref, field_index: u32, }; /// Trailing: -/// 0. `Ref` for every outputs_len -/// 1. `Ref` for every inputs_len +/// 0. `Inst.Ref` for every outputs_len +/// 1. `Inst.Ref` for every inputs_len pub const Asm = struct { /// Index to the corresponding ZIR instruction. /// `asm_source`, `outputs_len`, `inputs_len`, `clobbers_len`, `is_volatile`, and @@ -381,6 +381,24 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { return air.extra[body_index..][0..body_len]; } +pub fn getType(air: Air, inst: Air.Inst.Index) Type { + _ = air; + _ = inst; + @panic("TODO Air getType"); +} + +pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { + var i: usize = @enumToInt(ref); + if (i < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; + } + i -= Air.Inst.Ref.typed_value_map.len; + const air_tags = air.instructions.items(.tag); + const air_datas = air.instructions.items(.data); + assert(air_tags[i] == .const_ty); + return air_datas[i].ty; +} + /// Returns the requested data, as well as the new index which is at the start of the /// trailers for the object. pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end: usize } { diff --git a/src/Compilation.zig b/src/Compilation.zig index 90224a77d1..4a442a8b67 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2023,7 +2023,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor defer air.deinit(gpa); log.debug("analyze liveness of {s}", .{decl.name}); - var liveness = try Liveness.analyze(gpa, air); + var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); defer liveness.deinit(gpa); if (std.builtin.mode == .Debug and self.verbose_air) { diff --git a/src/Liveness.zig b/src/Liveness.zig index 1402a5997b..838f19d4a1 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -7,11 +7,13 @@ //! * Switch Branches const Liveness = @This(); const std = @import("std"); -const Air = @import("Air.zig"); const trace = @import("tracy.zig").trace; const log = std.log.scoped(.liveness); const assert = std.debug.assert; const Allocator = std.mem.Allocator; +const Air = @import("Air.zig"); +const Zir = @import("Zir.zig"); +const Log2Int = std.math.Log2Int; /// This array is split into sets of 4 bits per AIR instruction. /// The MSB (0bX000) is whether the instruction is unreferenced. @@ -44,7 +46,7 @@ pub const SwitchBr = struct { else_death_count: u32, }; -pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { +pub fn analyze(gpa: *Allocator, air: Air, zir: Zir) Allocator.Error!Liveness { const tracy = trace(@src()); defer tracy.end(); @@ -58,6 +60,7 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { ), .extra = .{}, .special = .{}, + .zir = &zir, }; errdefer gpa.free(a.tomb_bits); errdefer a.special.deinit(gpa); @@ -74,23 +77,32 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { }; } +pub fn getTombBits(l: Liveness, inst: Air.Inst.Index) Bpi { + const usize_index = (inst * bpi) / @bitSizeOf(usize); + return @truncate(Bpi, l.tomb_bits[usize_index] >> + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi)); +} + pub fn isUnused(l: Liveness, inst: Air.Inst.Index) bool { const usize_index = (inst * bpi) / @bitSizeOf(usize); - const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1)); + const mask = @as(usize, 1) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1)); return (l.tomb_bits[usize_index] & mask) != 0; } pub fn operandDies(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) bool { assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); - const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); + const mask = @as(usize, 1) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); return (l.tomb_bits[usize_index] & mask) != 0; } pub fn clearOperandDeath(l: *Liveness, inst: Air.Inst.Index, operand: OperandInt) void { assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); - const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); + const mask = @as(usize, 1) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); l.tomb_bits[usize_index] |= mask; } @@ -113,10 +125,12 @@ const Analysis = struct { tomb_bits: []usize, special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), extra: std.ArrayListUnmanaged(u32), + zir: *const Zir, fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void { const usize_index = (inst * bpi) / @bitSizeOf(usize); - a.tomb_bits[usize_index] |= tomb_bits << (inst % (@bitSizeOf(usize) / bpi)) * bpi; + a.tomb_bits[usize_index] |= @as(usize, tomb_bits) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi); } fn addExtra(a: *Analysis, extra: anytype) Allocator.Error!u32 { @@ -203,9 +217,11 @@ fn analyzeInst( return trackOperands(a, new_set, inst, main_tomb, .{ o.lhs, o.rhs, .none }); }, + .arg, .alloc, .br, .constant, + .const_ty, .breakpoint, .dbg_stmt, .varptr, @@ -255,15 +271,30 @@ fn analyzeInst( if (args.len <= bpi - 2) { var buf: [bpi - 1]Air.Inst.Ref = undefined; buf[0] = callee; - std.mem.copy(&buf, buf[1..], args); + std.mem.copy(Air.Inst.Ref, buf[1..], @bitCast([]const Air.Inst.Ref, args)); return trackOperands(a, new_set, inst, main_tomb, buf); } - @panic("TODO: liveness analysis for function with many args"); + @panic("TODO: liveness analysis for function with greater than 2 args"); }, .struct_field_ptr => { const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ extra.struct_ptr, .none, .none }); }, + .assembly => { + const extra = a.air.extraData(Air.Asm, inst_datas[inst].ty_pl.payload); + const extended = a.zir.instructions.items(.data)[extra.data.zir_index].extended; + const outputs_len = @truncate(u5, extended.small); + const inputs_len = @truncate(u5, extended.small >> 5); + const outputs = a.air.extra[extra.end..][0..outputs_len]; + const inputs = a.air.extra[extra.end + outputs.len ..][0..inputs_len]; + if (outputs.len + inputs.len <= bpi - 1) { + var buf: [bpi - 1]Air.Inst.Ref = undefined; + std.mem.copy(Air.Inst.Ref, &buf, @bitCast([]const Air.Inst.Ref, outputs)); + std.mem.copy(Air.Inst.Ref, buf[outputs.len..], @bitCast([]const Air.Inst.Ref, inputs)); + return trackOperands(a, new_set, inst, main_tomb, buf); + } + @panic("TODO: liveness analysis for asm with greater than 3 args"); + }, .block => { const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); const body = a.air.extra[extra.end..][0..extra.data.body_len]; @@ -287,8 +318,8 @@ fn analyzeInst( const then_body = a.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = a.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - var then_table = std.AutoHashMap(Air.Inst.Index, void).init(gpa); - defer then_table.deinit(); + var then_table: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}; + defer then_table.deinit(gpa); try analyzeWithContext(a, &then_table, then_body); // Reset the table back to its state from before the branch. @@ -299,8 +330,8 @@ fn analyzeInst( } } - var else_table = std.AutoHashMap(Air.Inst.Index, void).init(gpa); - defer else_table.deinit(); + var else_table: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}; + defer else_table.deinit(gpa); try analyzeWithContext(a, &else_table, else_body); var then_entry_deaths = std.ArrayList(Air.Inst.Index).init(gpa); @@ -331,7 +362,7 @@ fn analyzeInst( } // Now we have to correctly populate new_set. if (new_set) |ns| { - try ns.ensureCapacity(@intCast(u32, ns.count() + then_table.count() + else_table.count())); + try ns.ensureCapacity(gpa, @intCast(u32, ns.count() + then_table.count() + else_table.count())); var it = then_table.keyIterator(); while (it.next()) |key| { _ = ns.putAssumeCapacity(key.*, {}); @@ -344,7 +375,7 @@ fn analyzeInst( const then_death_count = @intCast(u32, then_entry_deaths.items.len); const else_death_count = @intCast(u32, else_entry_deaths.items.len); - try a.extra.ensureUnusedCapacity(std.meta.fields(@TypeOf(CondBr)).len + + try a.extra.ensureUnusedCapacity(gpa, std.meta.fields(Air.CondBr).len + then_death_count + else_death_count); const extra_index = a.addExtraAssumeCapacity(CondBr{ .then_death_count = then_death_count, @@ -352,7 +383,7 @@ fn analyzeInst( }); a.extra.appendSliceAssumeCapacity(then_entry_deaths.items); a.extra.appendSliceAssumeCapacity(else_entry_deaths.items); - try a.special.put(inst, extra_index); + try a.special.put(gpa, inst, extra_index); // Continue on with the instruction analysis. The following code will find the condition // instruction, and the deaths flag for the CondBr instruction will indicate whether the @@ -438,12 +469,12 @@ fn analyzeInst( }); for (case_deaths[0 .. case_deaths.len - 1]) |*cd| { const case_death_count = @intCast(u32, cd.items.len); - try a.extra.ensureUnusedCapacity(1 + case_death_count + else_death_count); + try a.extra.ensureUnusedCapacity(gpa, 1 + case_death_count + else_death_count); a.extra.appendAssumeCapacity(case_death_count); a.extra.appendSliceAssumeCapacity(cd.items); } a.extra.appendSliceAssumeCapacity(case_deaths[case_deaths.len - 1].items); - try a.special.put(inst, extra_index); + try a.special.put(gpa, inst, extra_index); return trackOperands(a, new_set, inst, main_tomb, .{ condition, .none, .none }); }, @@ -452,7 +483,7 @@ fn analyzeInst( fn trackOperands( a: *Analysis, - new_set: ?*std.AutoHashMap(Air.Inst.Index, void), + new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), inst: Air.Inst.Index, main_tomb: bool, operands: [bpi - 1]Air.Inst.Ref, @@ -468,12 +499,12 @@ fn trackOperands( tomb_bits <<= 1; const op_int = @enumToInt(operands[i]); if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const operand: Air.Inst.Index = op_int - Air.Inst.Ref.typed_value_map.len; + const operand: Air.Inst.Index = op_int - @intCast(u32, Air.Inst.Ref.typed_value_map.len); const prev = try table.fetchPut(gpa, operand, {}); if (prev == null) { // Death. tomb_bits |= 1; - if (new_set) |ns| try ns.putNoClobber(operand, {}); + if (new_set) |ns| try ns.putNoClobber(gpa, operand, {}); } } a.storeTombBits(inst, tomb_bits); diff --git a/src/Module.zig b/src/Module.zig index 5972c2bdcf..7ec9c7e93d 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1225,6 +1225,30 @@ pub const Scope = struct { pub fn getFileScope(block: *Block) *Scope.File { return block.src_decl.namespace.file_scope; } + + pub fn addTyOp( + block: *Block, + tag: Air.Inst.Tag, + ty: Type, + operand: Air.Inst.Ref, + ) error{OutOfMemory}!Air.Inst.Ref { + const sema = block.sema; + const gpa = sema.gpa; + + try sema.air_instructions.ensureUnusedCapacity(gpa, 1); + try block.instructions.ensureUnusedCapacity(gpa, 1); + + const inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + sema.air_instructions.appendAssumeCapacity(.{ + .tag = tag, + .data = .{ .ty_op = .{ + .ty = try sema.addType(ty), + .operand = operand, + } }, + }); + block.instructions.appendAssumeCapacity(inst); + return Sema.indexToRef(inst); + } }; }; @@ -3408,7 +3432,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { defer decl.value_arena.?.* = arena.state; const fn_ty = decl.ty; - const param_inst_list = try gpa.alloc(Air.Inst.Index, fn_ty.fnParamLen()); + const param_inst_list = try gpa.alloc(Air.Inst.Ref, fn_ty.fnParamLen()); defer gpa.free(param_inst_list); var sema: Sema = .{ @@ -3440,10 +3464,13 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { defer inner_block.instructions.deinit(gpa); // AIR requires the arg parameters to be the first N instructions. + try inner_block.instructions.ensureTotalCapacity(gpa, param_inst_list.len); for (param_inst_list) |*param_inst, param_index| { const param_type = fn_ty.fnParamType(param_index); const ty_ref = try sema.addType(param_type); - param_inst.* = @intCast(u32, sema.air_instructions.len); + const arg_index = @intCast(u32, sema.air_instructions.len); + inner_block.instructions.appendAssumeCapacity(arg_index); + param_inst.* = Sema.indexToRef(arg_index); try sema.air_instructions.append(gpa, .{ .tag = .arg, .data = .{ @@ -3454,7 +3481,6 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { }, }); } - try inner_block.instructions.appendSlice(gpa, param_inst_list); func.state = .in_progress; log.debug("set {s} to in_progress", .{decl.name}); @@ -4043,13 +4069,11 @@ pub fn floatMul( } pub fn simplePtrType( - mod: *Module, arena: *Allocator, elem_ty: Type, mutable: bool, size: std.builtin.TypeInfo.Pointer.Size, ) Allocator.Error!Type { - _ = mod; if (!mutable and size == .Slice and elem_ty.eql(Type.initTag(.u8))) { return Type.initTag(.const_slice_u8); } diff --git a/src/Sema.zig b/src/Sema.zig index 54c42a482d..fc130cd4a4 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -36,7 +36,7 @@ func: ?*Module.Fn, /// > Denormalized data to make `resolveInst` faster. This is 0 if not inside a function, /// > otherwise it is the number of parameters of the function. /// > param_count: u32 -param_inst_list: []const Air.Inst.Index, +param_inst_list: []const Air.Inst.Ref, branch_quota: u32 = 1000, branch_count: u32 = 0, /// This field is updated when a new source location becomes active, so that @@ -59,8 +59,6 @@ const TypedValue = @import("TypedValue.zig"); const Air = @import("Air.zig"); const Zir = @import("Zir.zig"); const Module = @import("Module.zig"); -const Inst = ir.Inst; -const Body = ir.Body; const trace = @import("tracy.zig").trace; const Scope = Module.Scope; const InnerError = Module.InnerError; @@ -117,7 +115,7 @@ pub fn analyzeFnBody( /// Returns only the result from the body that is specified. /// Only appropriate to call when it is determined at comptime that this body /// has no peers. -fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!Air.Inst.Index { +fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!Air.Inst.Ref { const break_inst = try sema.analyzeBody(block, body); const operand_ref = sema.code.instructions.items(.data)[break_inst].@"break".operand; return sema.resolveInst(operand_ref); @@ -513,7 +511,7 @@ pub fn analyzeBody( // const break_inst = try sema.analyzeBody(block, inline_body); // const break_data = datas[break_inst].@"break"; // if (inst == break_data.block_inst) { - // break :blk try sema.resolveInst(break_data.operand); + // break :blk sema.resolveInst(break_data.operand); // } else { // return break_inst; // } @@ -529,12 +527,12 @@ pub fn analyzeBody( // const break_inst = try sema.analyzeBody(block, inline_body); // const break_data = datas[break_inst].@"break"; // if (inst == break_data.block_inst) { - // break :blk try sema.resolveInst(break_data.operand); + // break :blk sema.resolveInst(break_data.operand); // } else { // return break_inst; // } //}, - else => @panic("TODO remove else prong"), + else => @panic("TODO finish updating Sema for AIR memory layout changes and then remove this else prong"), }; if (sema.getAirType(air_inst).isNoReturn()) return always_noreturn; @@ -543,7 +541,7 @@ pub fn analyzeBody( } } -fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const extended = sema.code.instructions.items(.data)[inst].extended; switch (extended.opcode) { // zig fmt: off @@ -598,7 +596,7 @@ fn resolveConstBool( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) !bool { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const wanted_type = Type.initTag(.bool); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); @@ -611,7 +609,7 @@ fn resolveConstString( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) ![]u8 { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const wanted_type = Type.initTag(.const_slice_u8); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); @@ -619,24 +617,39 @@ fn resolveConstString( } pub fn resolveType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); return sema.resolveAirAsType(block, src, air_inst); } -fn resolveAirAsType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, air_inst: Air.Inst.Index) !Type { +fn resolveAirAsType( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + air_inst: Air.Inst.Ref, +) !Type { const wanted_type = Type.initTag(.@"type"); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); return val.toType(sema.arena); } -fn resolveConstValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !Value { - return (try sema.resolveDefinedValue(block, src, base)) orelse +fn resolveConstValue( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + air_ref: Air.Inst.Ref, +) !Value { + return (try sema.resolveDefinedValue(block, src, air_ref)) orelse return sema.failWithNeededComptime(block, src); } -fn resolveDefinedValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !?Value { - if (try sema.resolvePossiblyUndefinedValue(block, src, base)) |val| { +fn resolveDefinedValue( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + air_ref: Air.Inst.Ref, +) !?Value { + if (try sema.resolvePossiblyUndefinedValue(block, src, air_ref)) |val| { if (val.isUndef()) { return sema.failWithUseOfUndef(block, src); } @@ -649,13 +662,29 @@ fn resolvePossiblyUndefinedValue( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - base: Air.Inst.Index, + air_ref: Air.Inst.Ref, ) !?Value { - if (try sema.typeHasOnePossibleValue(block, src, base.ty)) |opv| { + const ty = sema.getTypeOfAirRef(air_ref); + if (try sema.typeHasOnePossibleValue(block, src, ty)) |opv| { return opv; } - const inst = base.castTag(.constant) orelse return null; - return inst.val; + // First section of indexes correspond to a set number of constant values. + var i: usize = @enumToInt(air_ref); + if (i < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[i].val; + } + i -= Air.Inst.Ref.typed_value_map.len; + + switch (sema.air_instructions.items(.tag)[i]) { + .constant => { + const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; + return sema.air_values.items[ty_pl.payload]; + }, + .const_ty => { + return sema.air_instructions.items(.data)[i].ty.toValue(undefined) catch unreachable; + }, + else => return null, + } } fn failWithNeededComptime(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) InnerError { @@ -677,7 +706,7 @@ fn resolveAlreadyCoercedInt( comptime Int: type, ) !Int { comptime assert(@typeInfo(Int).Int.bits <= 64); - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const val = try sema.resolveConstValue(block, src, air_inst); switch (@typeInfo(Int).Int.signedness) { .signed => return @intCast(Int, val.toSignedInt()), @@ -692,7 +721,7 @@ fn resolveInt( zir_ref: Zir.Inst.Ref, dest_type: Type, ) !u64 { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, dest_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced); @@ -705,21 +734,21 @@ pub fn resolveInstConst( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) InnerError!TypedValue { - const air_inst = try sema.resolveInst(zir_ref); - const val = try sema.resolveConstValue(block, src, air_inst); + const air_ref = sema.resolveInst(zir_ref); + const val = try sema.resolveConstValue(block, src, air_ref); return TypedValue{ - .ty = air_inst.ty, + .ty = sema.getTypeOfAirRef(air_ref), .val = val, }; } -fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO implement zir_sema.zirBitcastResultPtr", .{}); } -fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = inst; const tracy = trace(@src()); defer tracy.end(); @@ -754,7 +783,7 @@ fn zirStructDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); @@ -825,7 +854,7 @@ fn zirEnumDecl( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1022,7 +1051,7 @@ fn zirUnionDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1086,7 +1115,7 @@ fn zirOpaqueDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1106,7 +1135,7 @@ fn zirErrorSetDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1146,7 +1175,7 @@ fn zirRetPtr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1154,16 +1183,16 @@ fn zirRetPtr( try sema.requireFunctionBlock(block, src); const fn_ty = sema.func.?.owner_decl.ty; const ret_type = fn_ty.fnReturnType(); - const ptr_type = try sema.mod.simplePtrType(sema.arena, ret_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, ret_type, true, .One); return block.addNoOp(src, ptr_type, .alloc); } -fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_tok; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.analyzeRef(block, inst_data.src(), operand); } @@ -1171,7 +1200,7 @@ fn zirRetType( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1187,7 +1216,7 @@ fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) I defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.ensureResultUsed(block, operand, src); @@ -1196,7 +1225,7 @@ fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) I fn ensureResultUsed( sema: *Sema, block: *Scope.Block, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, src: LazySrcLoc, ) InnerError!void { switch (operand.ty.zigTypeTag()) { @@ -1210,7 +1239,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); switch (operand.ty.zigTypeTag()) { .ErrorSet, .ErrorUnion => return sema.mod.fail(&block.base, src, "error is discarded", .{}), @@ -1218,13 +1247,13 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde } } -fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const array_ptr = try sema.resolveInst(inst_data.operand); + const array_ptr = sema.resolveInst(inst_data.operand); const elem_ty = array_ptr.ty.elemType(); if (!elem_ty.isIndexable()) { @@ -1267,7 +1296,7 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air // Set the name of the Air.Arg instruction for use by codegen debug info. const air_arg = sema.param_inst_list[arg_index]; - sema.air_instructions.items(.data)[air_arg].ty_str.str = inst_data.start; + sema.air_instructions.items(.data)[refToIndex(air_arg).?].ty_str.str = inst_data.start; return air_arg; } @@ -1275,13 +1304,13 @@ fn zirAllocExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocExtended", .{}); } -fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1289,7 +1318,7 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_type = try sema.resolveType(block, ty_src, inst_data.operand); - const ptr_type = try sema.mod.simplePtrType(sema.arena, var_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); const val_payload = try sema.arena.create(Value.Payload.ComptimeAlloc); val_payload.* = .{ @@ -1304,13 +1333,13 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne }); } -fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocInferredComptime", .{}); } -fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1318,12 +1347,12 @@ fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!A const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_decl_src = inst_data.src(); const var_type = try sema.resolveType(block, ty_src, inst_data.operand); - const ptr_type = try sema.mod.simplePtrType(sema.arena, var_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); try sema.requireRuntimeBlock(block, var_decl_src); return block.addNoOp(var_decl_src, ptr_type, .alloc); } -fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1332,7 +1361,7 @@ fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_type = try sema.resolveType(block, ty_src, inst_data.operand); try sema.validateVarType(block, ty_src, var_type); - const ptr_type = try sema.mod.simplePtrType(sema.arena, var_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); try sema.requireRuntimeBlock(block, var_decl_src); return block.addNoOp(var_decl_src, ptr_type, .alloc); } @@ -1342,7 +1371,7 @@ fn zirAllocInferred( block: *Scope.Block, inst: Zir.Inst.Index, inferred_alloc_ty: Type, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1372,7 +1401,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); const ptr_val = ptr.castTag(.constant).?.val; const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; const peer_inst_list = inferred_alloc.data.stored_inst_list.items; @@ -1385,7 +1414,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde if (var_is_mut) { try sema.validateVarType(block, ty_src, final_elem_ty); } - const final_ptr_ty = try sema.mod.simplePtrType(sema.arena, final_elem_ty, true, .One); + const final_ptr_ty = try Module.simplePtrType(sema.arena, final_elem_ty, true, .One); // Change it to a normal alloc. ptr.ty = final_ptr_ty; @@ -1406,7 +1435,7 @@ fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Ind const struct_obj: *Module.Struct = s: { const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; - const object_ptr = try sema.resolveInst(field_ptr_extra.lhs); + const object_ptr = sema.resolveInst(field_ptr_extra.lhs); break :s object_ptr.ty.elemType().castTag(.@"struct").?.data; }; @@ -1535,9 +1564,9 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In // to omit it. return; } - const ptr = try sema.resolveInst(bin_inst.lhs); - const value = try sema.resolveInst(bin_inst.rhs); - const ptr_ty = try sema.mod.simplePtrType(sema.arena, value.ty, true, .One); + const ptr = sema.resolveInst(bin_inst.lhs); + const value = sema.resolveInst(bin_inst.rhs); + const ptr_ty = try Module.simplePtrType(sema.arena, value.ty, true, .One); // TODO detect when this store should be done at compile-time. For example, // if expressions should force it when the condition is compile-time known. const src: LazySrcLoc = .unneeded; @@ -1552,14 +1581,14 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) const src: LazySrcLoc = .unneeded; const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const ptr = try sema.resolveInst(bin_inst.lhs); - const value = try sema.resolveInst(bin_inst.rhs); + const ptr = sema.resolveInst(bin_inst.lhs); + const value = sema.resolveInst(bin_inst.rhs); const inferred_alloc = ptr.castTag(.constant).?.val.castTag(.inferred_alloc).?; // Add the stored instruction to the set we will use to resolve peer types // for the inferred allocation. try inferred_alloc.data.stored_inst_list.append(sema.arena, value); // Create a runtime bitcast instruction with exactly the type the pointer wants. - const ptr_ty = try sema.mod.simplePtrType(sema.arena, value.ty, true, .One); + const ptr_ty = try Module.simplePtrType(sema.arena, value.ty, true, .One); try sema.requireRuntimeBlock(block, src); const bitcasted_ptr = try block.addUnOp(src, ptr_ty, .bitcast, ptr); return sema.storePtr(block, src, bitcasted_ptr, value); @@ -1578,8 +1607,8 @@ fn zirStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!v defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const ptr = try sema.resolveInst(bin_inst.lhs); - const value = try sema.resolveInst(bin_inst.rhs); + const ptr = sema.resolveInst(bin_inst.lhs); + const value = sema.resolveInst(bin_inst.rhs); return sema.storePtr(block, sema.src, ptr, value); } @@ -1590,18 +1619,18 @@ fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const ptr = try sema.resolveInst(extra.lhs); - const value = try sema.resolveInst(extra.rhs); + const ptr = sema.resolveInst(extra.lhs); + const value = sema.resolveInst(extra.rhs); return sema.storePtr(block, src, ptr, value); } -fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = .unneeded; const inst_data = sema.code.instructions.items(.data)[inst].param_type; - const fn_inst = try sema.resolveInst(inst_data.callee); + const fn_inst = sema.resolveInst(inst_data.callee); const param_index = inst_data.param_index; const fn_ty: Type = switch (fn_inst.ty.zigTypeTag()) { @@ -1631,7 +1660,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.constType(sema.arena, src, param_type); } -fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1659,7 +1688,7 @@ fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.analyzeDeclRef(block, .unneeded, new_decl); } -fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1668,7 +1697,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.mod.constIntUnsigned(sema.arena, .unneeded, Type.initTag(.comptime_int), int); } -fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1686,7 +1715,7 @@ fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! }); } -fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].float; @@ -1699,7 +1728,7 @@ fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!A }); } -fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; @@ -1728,7 +1757,7 @@ fn zirCompileLog( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { var managed = sema.mod.compile_log_text.toManaged(sema.gpa); defer sema.mod.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); @@ -1741,7 +1770,7 @@ fn zirCompileLog( for (args) |arg_ref, i| { if (i != 0) try writer.print(", ", .{}); - const arg = try sema.resolveInst(arg_ref); + const arg = sema.resolveInst(arg_ref); if (try sema.resolvePossiblyUndefinedValue(block, src, arg)) |val| { try writer.print("@as({}, {})", .{ arg.ty, val }); } else { @@ -1773,12 +1802,12 @@ fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); - const msg_inst = try sema.resolveInst(inst_data.operand); + const msg_inst = sema.resolveInst(inst_data.operand); return sema.panicWithMsg(block, src, msg_inst); } -fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1843,7 +1872,7 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } -fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1853,13 +1882,13 @@ fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirCImport", .{}); } -fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirSuspendBlock", .{}); } -fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1917,7 +1946,7 @@ fn resolveBlockBody( child_block: *Scope.Block, body: []const Zir.Inst.Index, merges: *Scope.Block.Merges, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { _ = try sema.analyzeBody(child_block, body); return sema.analyzeBlockBody(parent_block, src, child_block, merges); } @@ -1928,7 +1957,7 @@ fn analyzeBlockBody( src: LazySrcLoc, child_block: *Scope.Block, merges: *Scope.Block.Merges, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2088,7 +2117,7 @@ fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) InnerE const inst_data = sema.code.instructions.items(.data)[inst].@"break"; const src = sema.src; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const zir_block = inst_data.block_inst; var block = start_block; @@ -2136,7 +2165,7 @@ fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError _ = try block.addDbgStmt(.unneeded, inst_data.line, inst_data.column); } -fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2144,7 +2173,7 @@ fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeDeclRef(block, src, decl); } -fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2198,7 +2227,7 @@ fn zirCall( inst: Zir.Inst.Index, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2208,12 +2237,12 @@ fn zirCall( const extra = sema.code.extraData(Zir.Inst.Call, inst_data.payload_index); const args = sema.code.refSlice(extra.end, extra.data.args_len); - const func = try sema.resolveInst(extra.data.callee); + const func = sema.resolveInst(extra.data.callee); // TODO handle function calls of generic functions - const resolved_args = try sema.arena.alloc(Air.Inst.Index, args.len); + const resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len); for (args) |zir_arg, i| { // the args are already casted to the result of a param type instruction. - resolved_args[i] = try sema.resolveInst(zir_arg); + resolved_args[i] = sema.resolveInst(zir_arg); } return sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args); @@ -2222,13 +2251,13 @@ fn zirCall( fn analyzeCall( sema: *Sema, block: *Scope.Block, - func: Air.Inst.Index, + func: Air.Inst.Ref, func_src: LazySrcLoc, call_src: LazySrcLoc, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, - args: []const Air.Inst.Index, -) InnerError!Air.Inst.Index { + args: []const Air.Inst.Ref, +) InnerError!Air.Inst.Ref { if (func.ty.zigTypeTag() != .Fn) return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func.ty}); @@ -2285,7 +2314,7 @@ fn analyzeCall( const is_comptime_call = block.is_comptime or modifier == .compile_time; const is_inline_call = is_comptime_call or modifier == .always_inline or func.ty.fnCallingConvention() == .Inline; - const result: Air.Inst.Index = if (is_inline_call) res: { + const result: Air.Inst.Ref = if (is_inline_call) res: { const func_val = try sema.resolveConstValue(block, func_src, func); const module_fn = switch (func_val.tag()) { .function => func_val.castTag(.function).?.data, @@ -2383,7 +2412,7 @@ fn analyzeCall( return result; } -fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2395,7 +2424,7 @@ fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.constType(sema.arena, src, ty); } -fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2407,7 +2436,7 @@ fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.mod.constType(sema.arena, src, opt_type); } -fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const array_type = try sema.resolveType(block, src, inst_data.operand); @@ -2415,7 +2444,7 @@ fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.constType(sema.arena, src, elem_type); } -fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -2430,7 +2459,7 @@ fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.mod.constType(sema.arena, src, vector_type); } -fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2443,7 +2472,7 @@ fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.constType(sema.arena, .unneeded, array_ty); } -fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2458,7 +2487,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) return sema.mod.constType(sema.arena, .unneeded, array_ty); } -fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2471,7 +2500,7 @@ fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.mod.constType(sema.arena, src, anyframe_type); } -fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2492,7 +2521,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.constType(sema.arena, src, err_union_ty); } -fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2511,14 +2540,14 @@ fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr }); } -fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const op = try sema.resolveInst(inst_data.operand); + const op = sema.resolveInst(inst_data.operand); const op_coerced = try sema.coerce(block, Type.initTag(.anyerror), op, operand_src); const result_ty = Type.initTag(.u16); @@ -2541,7 +2570,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addUnOp(src, result_ty, .bitcast, op_coerced); } -fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2549,7 +2578,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const op = try sema.resolveInst(inst_data.operand); + const op = sema.resolveInst(inst_data.operand); if (try sema.resolveDefinedValue(block, operand_src, op)) |value| { const int = value.toUnsignedInt(); @@ -2574,7 +2603,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addUnOp(src, Type.initTag(.anyerror), .bitcast, op); } -fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2583,8 +2612,8 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); if (rhs.ty.zigTypeTag() == .Bool and lhs.ty.zigTypeTag() == .Bool) { const msg = msg: { const msg = try sema.mod.errMsg(&block.base, lhs_src, "expected error set type, found 'bool'", .{}); @@ -2664,7 +2693,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn }); } -fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2678,15 +2707,15 @@ fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE }); } -fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); - const enum_tag: Air.Inst.Index = switch (operand.ty.zigTypeTag()) { + const enum_tag: Air.Inst.Ref = switch (operand.ty.zigTypeTag()) { .Enum => operand, .Union => { //if (!operand.ty.unionHasTag()) { @@ -2760,7 +2789,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return block.addUnOp(src, int_tag_ty, .bitcast, enum_tag); } -fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const mod = sema.mod; const target = mod.getTarget(); const arena = sema.arena; @@ -2770,7 +2799,7 @@ fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); + const operand = sema.resolveInst(extra.rhs); if (dest_ty.zigTypeTag() != .Enum) { return mod.fail(&block.base, dest_ty_src, "expected enum, found {}", .{dest_ty}); @@ -2821,12 +2850,12 @@ fn zirOptionalPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const optional_ptr = try sema.resolveInst(inst_data.operand); + const optional_ptr = sema.resolveInst(inst_data.operand); assert(optional_ptr.ty.zigTypeTag() == .Pointer); const src = inst_data.src(); @@ -2836,7 +2865,7 @@ fn zirOptionalPayloadPtr( } const child_type = try opt_type.optionalChildAlloc(sema.arena); - const child_pointer = try sema.mod.simplePtrType(sema.arena, child_type, !optional_ptr.ty.isConstPtr(), .One); + const child_pointer = try Module.simplePtrType(sema.arena, child_type, !optional_ptr.ty.isConstPtr(), .One); if (optional_ptr.value()) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); @@ -2864,13 +2893,13 @@ fn zirOptionalPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const opt_type = operand.ty; if (opt_type.zigTypeTag() != .Optional) { return sema.mod.fail(&block.base, src, "expected optional type, found {}", .{opt_type}); @@ -2902,13 +2931,13 @@ fn zirErrUnionPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, operand.src, "expected error union type, found '{}'", .{operand.ty}); @@ -2936,19 +2965,19 @@ fn zirErrUnionPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); assert(operand.ty.zigTypeTag() == .Pointer); if (operand.ty.elemType().zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found {}", .{operand.ty.elemType()}); - const operand_pointer_ty = try sema.mod.simplePtrType(sema.arena, operand.ty.elemType().castTag(.error_union).?.data.payload, !operand.ty.isConstPtr(), .One); + const operand_pointer_ty = try Module.simplePtrType(sema.arena, operand.ty.elemType().castTag(.error_union).?.data.payload, !operand.ty.isConstPtr(), .One); if (operand.value()) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); @@ -2975,13 +3004,13 @@ fn zirErrUnionPayloadPtr( } /// Value in, value out -fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand.ty}); @@ -3001,13 +3030,13 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner } /// Pointer in, value out -fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); assert(operand.ty.zigTypeTag() == .Pointer); if (operand.ty.elemType().zigTypeTag() != .ErrorUnion) @@ -3035,7 +3064,7 @@ fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand.ty}); if (operand.ty.castTag(.error_union).?.data.payload.zigTypeTag() != .Void) { @@ -3048,7 +3077,7 @@ fn zirFunc( block: *Scope.Block, inst: Zir.Inst.Index, inferred_error_set: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3099,7 +3128,7 @@ fn funcCommon( is_extern: bool, src_locs: Zir.Inst.Func.SrcLocs, opt_lib_name: ?[]const u8, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const bare_return_type = try sema.resolveType(block, ret_ty_src, zir_return_type); @@ -3240,7 +3269,7 @@ fn funcCommon( return result; } -fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3248,7 +3277,7 @@ fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air. return sema.analyzeAs(block, .unneeded, bin_inst.lhs, bin_inst.rhs); } -fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3264,18 +3293,18 @@ fn analyzeAs( src: LazySrcLoc, zir_dest_type: Zir.Inst.Ref, zir_operand: Zir.Inst.Ref, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const dest_type = try sema.resolveType(block, src, zir_dest_type); - const operand = try sema.resolveInst(zir_operand); + const operand = sema.resolveInst(zir_operand); return sema.coerce(block, dest_type, operand, src); } -fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); if (ptr.ty.zigTypeTag() != .Pointer) { const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr.ty}); @@ -3287,7 +3316,7 @@ fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(src, ty, .ptrtoint, ptr); } -fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3296,7 +3325,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); - const object = try sema.resolveInst(extra.lhs); + const object = sema.resolveInst(extra.lhs); const object_ptr = if (object.ty.zigTypeTag() == .Pointer) object else @@ -3305,7 +3334,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3314,11 +3343,11 @@ fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); - const object_ptr = try sema.resolveInst(extra.lhs); + const object_ptr = sema.resolveInst(extra.lhs); return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3326,14 +3355,14 @@ fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; - const object = try sema.resolveInst(extra.lhs); + const object = sema.resolveInst(extra.lhs); const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); const object_ptr = try sema.analyzeRef(block, src, object); const result_ptr = try sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3341,12 +3370,12 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; - const object_ptr = try sema.resolveInst(extra.lhs); + const object_ptr = sema.resolveInst(extra.lhs); const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3357,7 +3386,7 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); + const operand = sema.resolveInst(extra.rhs); const dest_is_comptime_int = switch (dest_type.zigTypeTag()) { .ComptimeInt => true, @@ -3389,20 +3418,21 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten int", .{}); } -fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); - return sema.bitcast(block, dest_type, operand); + const operand = sema.resolveInst(extra.rhs); + return sema.bitcast(block, dest_type, operand, operand_src); } -fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3413,7 +3443,7 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); + const operand = sema.resolveInst(extra.rhs); const dest_is_comptime_float = switch (dest_type.zigTypeTag()) { .ComptimeFloat => true, @@ -3445,22 +3475,22 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten float", .{}); } -fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const array = try sema.resolveInst(bin_inst.lhs); + const array = sema.resolveInst(bin_inst.lhs); const array_ptr = if (array.ty.zigTypeTag() == .Pointer) array else try sema.analyzeRef(block, sema.src, array); - const elem_index = try sema.resolveInst(bin_inst.rhs); + const elem_index = sema.resolveInst(bin_inst.rhs); const result_ptr = try sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); return sema.analyzeLoad(block, sema.src, result_ptr, sema.src); } -fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3468,27 +3498,27 @@ fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE const src = inst_data.src(); const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const array = try sema.resolveInst(extra.lhs); + const array = sema.resolveInst(extra.lhs); const array_ptr = if (array.ty.zigTypeTag() == .Pointer) array else try sema.analyzeRef(block, src, array); - const elem_index = try sema.resolveInst(extra.rhs); + const elem_index = sema.resolveInst(extra.rhs); const result_ptr = try sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const array_ptr = try sema.resolveInst(bin_inst.lhs); - const elem_index = try sema.resolveInst(bin_inst.rhs); + const array_ptr = sema.resolveInst(bin_inst.lhs); + const elem_index = sema.resolveInst(bin_inst.rhs); return sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); } -fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3496,39 +3526,39 @@ fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE const src = inst_data.src(); const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const elem_index = try sema.resolveInst(extra.rhs); + const array_ptr = sema.resolveInst(extra.lhs); + const elem_index = sema.resolveInst(extra.rhs); return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); } -fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.SliceStart, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const start = try sema.resolveInst(extra.start); + const array_ptr = sema.resolveInst(extra.lhs); + const start = sema.resolveInst(extra.start); return sema.analyzeSlice(block, src, array_ptr, start, null, null, .unneeded); } -fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.SliceEnd, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const start = try sema.resolveInst(extra.start); - const end = try sema.resolveInst(extra.end); + const array_ptr = sema.resolveInst(extra.lhs); + const start = sema.resolveInst(extra.start); + const end = sema.resolveInst(extra.end); return sema.analyzeSlice(block, src, array_ptr, start, end, null, .unneeded); } -fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3536,10 +3566,10 @@ fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const sentinel_src: LazySrcLoc = .{ .node_offset_slice_sentinel = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.SliceSentinel, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const start = try sema.resolveInst(extra.start); - const end = try sema.resolveInst(extra.end); - const sentinel = try sema.resolveInst(extra.sentinel); + const array_ptr = sema.resolveInst(extra.lhs); + const start = sema.resolveInst(extra.start); + const end = sema.resolveInst(extra.end); + const sentinel = sema.resolveInst(extra.sentinel); return sema.analyzeSlice(block, src, array_ptr, start, end, sentinel, sentinel_src); } @@ -3550,7 +3580,7 @@ fn zirSwitchCapture( inst: Zir.Inst.Index, is_multi: bool, is_ref: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3569,7 +3599,7 @@ fn zirSwitchCaptureElse( block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3588,7 +3618,7 @@ fn zirSwitchBlock( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3597,7 +3627,7 @@ fn zirSwitchBlock( const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index); - const operand_ptr = try sema.resolveInst(extra.data.operand); + const operand_ptr = sema.resolveInst(extra.data.operand); const operand = if (is_ref) try sema.analyzeLoad(block, src, operand_ptr, operand_src) else @@ -3621,7 +3651,7 @@ fn zirSwitchBlockMulti( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3630,7 +3660,7 @@ fn zirSwitchBlockMulti( const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.SwitchBlockMulti, inst_data.payload_index); - const operand_ptr = try sema.resolveInst(extra.data.operand); + const operand_ptr = sema.resolveInst(extra.data.operand); const operand = if (is_ref) try sema.analyzeLoad(block, src, operand_ptr, operand_src) else @@ -3651,14 +3681,14 @@ fn zirSwitchBlockMulti( fn analyzeSwitch( sema: *Sema, block: *Scope.Block, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, extra_end: usize, special_prong: Zir.SpecialProng, scalar_cases_len: usize, multi_cases_len: usize, switch_inst: Zir.Inst.Index, src_node_offset: i32, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const gpa = sema.gpa; const mod = sema.mod; @@ -4217,7 +4247,7 @@ fn analyzeSwitch( const bool_ty = comptime Type.initTag(.bool); for (items) |item_ref| { - const item = try sema.resolveInst(item_ref); + const item = sema.resolveInst(item_ref); _ = try sema.resolveConstValue(&child_block, item.src, item); const cmp_ok = try case_block.addBinOp(item.src, bool_ty, .cmp_eq, operand, item); @@ -4235,8 +4265,8 @@ fn analyzeSwitch( const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; - const item_first = try sema.resolveInst(first_ref); - const item_last = try sema.resolveInst(last_ref); + const item_first = sema.resolveInst(first_ref); + const item_last = sema.resolveInst(last_ref); _ = try sema.resolveConstValue(&child_block, item_first.src, item_first); _ = try sema.resolveConstValue(&child_block, item_last.src, item_last); @@ -4334,7 +4364,7 @@ fn resolveSwitchItemVal( switch_prong_src: Module.SwitchProngSrc, range_expand: Module.SwitchProngSrc.RangeExpand, ) InnerError!TypedValue { - const item = try sema.resolveInst(item_ref); + const item = sema.resolveInst(item_ref); // We have to avoid the other helper functions here because we cannot construct a LazySrcLoc // because we only have the switch AST node. Only if we know for sure we need to report // a compile error do we resolve the full source locations. @@ -4513,7 +4543,7 @@ fn validateSwitchNoRange( return sema.mod.failWithOwnedErrorMsg(&block.base, msg); } -fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; _ = extra; @@ -4522,7 +4552,7 @@ fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, src, "TODO implement zirHasField", .{}); } -fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -4547,7 +4577,7 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return mod.constBool(arena, src, false); } -fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4572,13 +4602,13 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return mod.constType(sema.arena, src, file_root_decl.ty); } -fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; _ = inst; return sema.mod.fail(&block.base, sema.src, "TODO implement zirRetErrValueCode", .{}); } -fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4587,7 +4617,7 @@ fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.mod.fail(&block.base, sema.src, "TODO implement zirShl", .{}); } -fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4599,8 +4629,8 @@ fn zirBitwise( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, - ir_tag: ir.Inst.Tag, -) InnerError!Air.Inst.Index { + air_tag: Air.Inst.Tag, +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4609,8 +4639,8 @@ fn zirBitwise( const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); @@ -4655,10 +4685,10 @@ fn zirBitwise( } try sema.requireRuntimeBlock(block, src); - return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); + return block.addBinOp(src, scalar_type, air_tag, casted_lhs, casted_rhs); } -fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4666,7 +4696,7 @@ fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.fail(&block.base, sema.src, "TODO implement zirBitNot", .{}); } -fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4674,7 +4704,7 @@ fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayCat", .{}); } -fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4687,7 +4717,7 @@ fn zirNegate( block: *Scope.Block, inst: Zir.Inst.Index, tag_override: Zir.Inst.Tag, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4695,13 +4725,13 @@ fn zirNegate( const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - const lhs = try sema.resolveInst(.zero); - const rhs = try sema.resolveInst(inst_data.operand); + const lhs = sema.resolveInst(.zero); + const rhs = sema.resolveInst(inst_data.operand); return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src); } -fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4711,8 +4741,8 @@ fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); return sema.analyzeArithmetic(block, tag_override, lhs, rhs, sema.src, lhs_src, rhs_src); } @@ -4721,7 +4751,7 @@ fn zirOverflowArithmetic( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4735,12 +4765,12 @@ fn analyzeArithmetic( sema: *Sema, block: *Scope.Block, zir_tag: Zir.Inst.Tag, - lhs: Air.Inst.Index, - rhs: Air.Inst.Index, + lhs: Air.Inst.Ref, + rhs: Air.Inst.Ref, src: LazySrcLoc, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); @@ -4850,14 +4880,14 @@ fn analyzeArithmetic( return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); } -fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr_src: LazySrcLoc = .{ .node_offset_deref_ptr = inst_data.src_node }; - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); return sema.analyzeLoad(block, src, ptr, ptr_src); } @@ -4865,7 +4895,7 @@ fn zirAsm( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4915,7 +4945,7 @@ fn zirAsm( const name = sema.code.nullTerminatedString(input.data.name); _ = name; // TODO: use the name - arg.* = try sema.resolveInst(input.data.operand); + arg.* = sema.resolveInst(input.data.operand); inputs[arg_i] = sema.code.nullTerminatedString(input.data.constraint); } @@ -4949,7 +4979,7 @@ fn zirCmp( block: *Scope.Block, inst: Zir.Inst.Index, op: std.math.CompareOperator, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4960,8 +4990,8 @@ fn zirCmp( const src: LazySrcLoc = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); const is_equality_cmp = switch (op) { .eq, .neq => true, @@ -5047,7 +5077,7 @@ fn zirCmp( return block.addBinOp(src, bool_type, tag, casted_lhs, casted_rhs); } -fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5057,7 +5087,7 @@ fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.constIntUnsigned(sema.arena, src, Type.initTag(.comptime_int), abi_size); } -fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5071,7 +5101,7 @@ fn zirThis( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirThis", .{}); } @@ -5080,7 +5110,7 @@ fn zirRetAddr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirRetAddr", .{}); } @@ -5089,12 +5119,12 @@ fn zirBuiltinSrc( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinSrc", .{}); } -fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); @@ -5137,31 +5167,31 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } } -fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.mod.constType(sema.arena, src, operand.ty); } -fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand_ptr = try sema.resolveInst(inst_data.operand); + const operand_ptr = sema.resolveInst(inst_data.operand); const elem_ty = operand_ptr.ty.elemType(); return sema.mod.constType(sema.arena, src, elem_ty); } -fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirTypeofLog2IntType", .{}); } -fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirLog2IntType", .{}); @@ -5171,7 +5201,7 @@ fn zirTypeofPeer( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5183,20 +5213,20 @@ fn zirTypeofPeer( defer sema.gpa.free(inst_list); for (args) |arg_ref, i| { - inst_list[i] = try sema.resolveInst(arg_ref); + inst_list[i] = sema.resolveInst(arg_ref); } const result_type = try sema.resolvePeerTypes(block, src, inst_list); return sema.mod.constType(sema.arena, src, result_type); } -fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const uncasted_operand = try sema.resolveInst(inst_data.operand); + const uncasted_operand = sema.resolveInst(inst_data.operand); const bool_type = Type.initTag(.bool); const operand = try sema.coerce(block, bool_type, uncasted_operand, uncasted_operand.src); @@ -5212,16 +5242,16 @@ fn zirBoolOp( block: *Scope.Block, inst: Zir.Inst.Index, comptime is_bool_or: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = .unneeded; const bool_type = Type.initTag(.bool); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const uncasted_lhs = try sema.resolveInst(bin_inst.lhs); + const uncasted_lhs = sema.resolveInst(bin_inst.lhs); const lhs = try sema.coerce(block, bool_type, uncasted_lhs, uncasted_lhs.src); - const uncasted_rhs = try sema.resolveInst(bin_inst.rhs); + const uncasted_rhs = sema.resolveInst(bin_inst.rhs); const rhs = try sema.coerce(block, bool_type, uncasted_rhs, uncasted_rhs.src); if (lhs.value()) |lhs_val| { @@ -5234,7 +5264,7 @@ fn zirBoolOp( } } try sema.requireRuntimeBlock(block, src); - const tag: ir.Inst.Tag = if (is_bool_or) .bool_or else .bool_and; + const tag: Air.Inst.Tag = if (is_bool_or) .bool_or else .bool_and; return block.addBinOp(src, bool_type, tag, lhs, rhs); } @@ -5243,14 +5273,14 @@ fn zirBoolBr( parent_block: *Scope.Block, inst: Zir.Inst.Index, is_bool_or: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const datas = sema.code.instructions.items(.data); const inst_data = datas[inst].bool_br; const src: LazySrcLoc = .unneeded; - const lhs = try sema.resolveInst(inst_data.lhs); + const lhs = sema.resolveInst(inst_data.lhs); const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; @@ -5313,13 +5343,13 @@ fn zirIsNonNull( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.analyzeIsNull(block, src, operand, true); } @@ -5327,33 +5357,33 @@ fn zirIsNonNullPtr( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNull(block, src, loaded, true); } -fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.analyzeIsNonErr(block, inst_data.src(), operand); } -fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNonErr(block, src, loaded); } @@ -5374,7 +5404,7 @@ fn zirCondbr( const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - const uncasted_cond = try sema.resolveInst(extra.data.condition); + const uncasted_cond = sema.resolveInst(extra.data.condition); const cond = try sema.coerce(parent_block, Type.initTag(.bool), uncasted_cond, cond_src); if (try sema.resolveDefinedValue(parent_block, src, cond)) |cond_val| { @@ -5456,7 +5486,7 @@ fn zirRetCoerce( defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_tok; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.analyzeRet(block, operand, src, need_coercion); @@ -5467,7 +5497,7 @@ fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.analyzeRet(block, operand, src, false); @@ -5476,7 +5506,7 @@ fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError fn analyzeRet( sema: *Sema, block: *Scope.Block, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, src: LazySrcLoc, need_coercion: bool, ) InnerError!Zir.Inst.Index { @@ -5511,7 +5541,7 @@ fn floatOpAllowed(tag: Zir.Inst.Tag) bool { }; } -fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5532,7 +5562,7 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.mod.constType(sema.arena, .unneeded, ty); } -fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5586,7 +5616,7 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.constType(sema.arena, src, ty); } -fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5600,13 +5630,13 @@ fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In }); } -fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnionInitPtr", .{}); } -fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); @@ -5657,7 +5687,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return mod.failWithOwnedErrorMsg(&block.base, msg); } found_fields[field_index] = item.data.field_type; - field_inits[field_index] = try sema.resolveInst(item.data.init); + field_inits[field_index] = sema.resolveInst(item.data.init); } var root_msg: ?*Module.ErrorMsg = null; @@ -5719,7 +5749,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return mod.fail(&block.base, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{}); } -fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5727,7 +5757,7 @@ fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ return sema.mod.fail(&block.base, src, "TODO: Sema.zirStructInitAnon", .{}); } -fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5735,7 +5765,7 @@ fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInit", .{}); } -fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5743,13 +5773,13 @@ fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_r return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInitAnon", .{}); } -fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldTypeRef", .{}); } -fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const src = inst_data.src(); @@ -5771,7 +5801,7 @@ fn zirErrorReturnTrace( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorReturnTrace", .{}); } @@ -5780,7 +5810,7 @@ fn zirFrame( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrame", .{}); } @@ -5789,91 +5819,91 @@ fn zirFrameAddress( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameAddress", .{}); } -fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignOf", .{}); } -fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBoolToInt", .{}); } -fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirEmbedFile", .{}); } -fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorName", .{}); } -fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnaryMath", .{}); } -fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTagName", .{}); } -fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReify", .{}); } -fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTypeName", .{}); } -fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameType", .{}); } -fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameSize", .{}); } -fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFloatToInt", .{}); } -fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirIntToFloat", .{}); } -fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; - const operand_res = try sema.resolveInst(extra.rhs); + const operand_res = sema.resolveInst(extra.rhs); const operand_coerced = try sema.coerce(block, Type.initTag(.usize), operand_res, operand_src); const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5929,199 +5959,199 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(src, type_res, .bitcast, operand_coerced); } -fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrSetCast", .{}); } -fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPtrCast", .{}); } -fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTruncate", .{}); } -fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignCast", .{}); } -fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirClz", .{}); } -fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCtz", .{}); } -fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPopCount", .{}); } -fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirByteSwap", .{}); } -fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitReverse", .{}); } -fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivExact", .{}); } -fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivFloor", .{}); } -fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivTrunc", .{}); } -fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMod", .{}); } -fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirRem", .{}); } -fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShlExact", .{}); } -fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShrExact", .{}); } -fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitOffsetOf", .{}); } -fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirOffsetOf", .{}); } -fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCmpxchg", .{}); } -fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirSplat", .{}); } -fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReduce", .{}); } -fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShuffle", .{}); } -fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicLoad", .{}); } -fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicRmw", .{}); } -fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicStore", .{}); } -fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMulAdd", .{}); } -fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinCall", .{}); } -fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldPtrType", .{}); } -fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldParentPtr", .{}); } -fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy", .{}); } -fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset", .{}); } -fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinAsyncCall", .{}); } -fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirResume", .{}); @@ -6132,7 +6162,7 @@ fn zirAwait( block: *Scope.Block, inst: Zir.Inst.Index, is_nosuspend: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -6144,7 +6174,7 @@ fn zirVarExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const src = sema.src; const ty_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at type @@ -6210,7 +6240,7 @@ fn zirFuncExtended( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -6277,7 +6307,7 @@ fn zirCUndef( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCUndef", .{}); @@ -6287,7 +6317,7 @@ fn zirCInclude( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCInclude", .{}); @@ -6297,7 +6327,7 @@ fn zirCDefine( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCDefine", .{}); @@ -6307,7 +6337,7 @@ fn zirWasmMemorySize( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemorySize", .{}); @@ -6317,7 +6347,7 @@ fn zirWasmMemoryGrow( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemoryGrow", .{}); @@ -6327,7 +6357,7 @@ fn zirBuiltinExtern( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinExtern", .{}); @@ -6361,7 +6391,7 @@ pub const PanicId = enum { invalid_error_code, }; -fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Index, panic_id: PanicId) !void { +fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Ref, panic_id: PanicId) !void { const block_inst = try sema.arena.create(Inst.Block); block_inst.* = .{ .base = .{ @@ -6423,7 +6453,7 @@ fn panicWithMsg( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - msg_inst: Air.Inst.Index, + msg_inst: Air.Inst.Ref, ) !Zir.Inst.Index { const mod = sema.mod; const arena = sema.arena; @@ -6439,7 +6469,7 @@ fn panicWithMsg( const panic_fn = try sema.getBuiltin(block, src, "panic"); const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try mod.simplePtrType(arena, stack_trace_ty, true, .One); + const ptr_stack_trace_ty = try Module.simplePtrType(arena, stack_trace_ty, true, .One); const null_stack_trace = try mod.constInst(arena, src, .{ .ty = try mod.optionalType(arena, ptr_stack_trace_ty), .val = Value.initTag(.null_value), @@ -6500,10 +6530,10 @@ fn namedFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - object_ptr: Air.Inst.Index, + object_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; @@ -6579,7 +6609,7 @@ fn namedFieldPtr( } else (try mod.getErrorValue(field_name)).key; return mod.constInst(arena, src, .{ - .ty = try mod.simplePtrType(arena, child_type, false, .One), + .ty = try Module.simplePtrType(arena, child_type, false, .One), .val = try Value.Tag.ref_val.create( arena, try Value.Tag.@"error".create(arena, .{ @@ -6633,7 +6663,7 @@ fn namedFieldPtr( const field_index_u32 = @intCast(u32, field_index); const enum_val = try Value.Tag.enum_field_index.create(arena, field_index_u32); return mod.constInst(arena, src, .{ - .ty = try mod.simplePtrType(arena, child_type, false, .One), + .ty = try Module.simplePtrType(arena, child_type, false, .One), .val = try Value.Tag.ref_val.create(arena, enum_val), }); }, @@ -6653,7 +6683,7 @@ fn analyzeNamespaceLookup( src: LazySrcLoc, namespace: *Scope.Namespace, decl_name: []const u8, -) InnerError!?Air.Inst.Index { +) InnerError!?Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; if (try sema.lookupInNamespace(namespace, decl_name)) |decl| { @@ -6677,11 +6707,11 @@ fn analyzeStructFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - struct_ptr: Air.Inst.Index, + struct_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; assert(unresolved_struct_ty.zigTypeTag() == .Struct); @@ -6692,7 +6722,7 @@ fn analyzeStructFieldPtr( const field_index = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadFieldAccess(block, struct_obj, field_name_src, field_name); const field = struct_obj.fields.values()[field_index]; - const ptr_field_ty = try mod.simplePtrType(arena, field.ty, true, .One); + const ptr_field_ty = try Module.simplePtrType(arena, field.ty, true, .One); if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { return mod.constInst(arena, src, .{ @@ -6712,11 +6742,11 @@ fn analyzeUnionFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - union_ptr: Air.Inst.Index, + union_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_union_ty: Type, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; assert(unresolved_union_ty.zigTypeTag() == .Union); @@ -6728,7 +6758,7 @@ fn analyzeUnionFieldPtr( return sema.failWithBadUnionFieldAccess(block, union_obj, field_name_src, field_name); const field = union_obj.fields.values()[field_index]; - const ptr_field_ty = try mod.simplePtrType(arena, field.ty, true, .One); + const ptr_field_ty = try Module.simplePtrType(arena, field.ty, true, .One); if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| { // TODO detect inactive union field and emit compile error @@ -6749,10 +6779,10 @@ fn elemPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: Air.Inst.Index, - elem_index: Air.Inst.Index, + array_ptr: Air.Inst.Ref, + elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const array_ty = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, array_ptr.src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -6776,10 +6806,10 @@ fn elemPtrArray( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: Air.Inst.Index, - elem_index: Air.Inst.Index, + array_ptr: Air.Inst.Ref, + elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { if (array_ptr.value()) |array_ptr_val| { if (elem_index.value()) |index_val| { // Both array pointer and index are compile-time known. @@ -6804,35 +6834,41 @@ fn coerce( sema: *Sema, block: *Scope.Block, dest_type: Type, - inst: Air.Inst.Index, + inst: Air.Inst.Ref, inst_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { if (dest_type.tag() == .var_args_param) { - return sema.coerceVarArgParam(block, inst); + return sema.coerceVarArgParam(block, inst, inst_src); } + + const inst_ty = sema.getTypeOfAirRef(inst); // If the types are the same, we can return the operand. - if (dest_type.eql(inst.ty)) + if (dest_type.eql(inst_ty)) return inst; - const in_memory_result = coerceInMemoryAllowed(dest_type, inst.ty); + const in_memory_result = coerceInMemoryAllowed(dest_type, inst_ty); if (in_memory_result == .ok) { - return sema.bitcast(block, dest_type, inst); + return sema.bitcast(block, dest_type, inst, inst_src); } const mod = sema.mod; const arena = sema.arena; // undefined to anything - if (inst.value()) |val| { - if (val.isUndef() or inst.ty.zigTypeTag() == .Undefined) { - return mod.constInst(arena, inst_src, .{ .ty = dest_type, .val = val }); + if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { + if (val.isUndef() or inst_ty.zigTypeTag() == .Undefined) { + return sema.addConstant(dest_type, val); } } - assert(inst.ty.zigTypeTag() != .Undefined); + assert(inst_ty.zigTypeTag() != .Undefined); + + if (true) { + @panic("TODO finish AIR memory layout rework"); + } // T to E!T or E to E!T if (dest_type.tag() == .error_union) { - return try sema.wrapErrorUnion(block, dest_type, inst); + return try sema.wrapErrorUnion(block, dest_type, inst, inst_src); } // comptime known number to other number @@ -6844,14 +6880,14 @@ fn coerce( switch (dest_type.zigTypeTag()) { .Optional => { // null to ?T - if (inst.ty.zigTypeTag() == .Null) { + if (inst_ty.zigTypeTag() == .Null) { return mod.constInst(arena, inst_src, .{ .ty = dest_type, .val = Value.initTag(.null_value) }); } // T to ?T var buf: Type.Payload.ElemType = undefined; const child_type = dest_type.optionalChild(&buf); - if (child_type.eql(inst.ty)) { + if (child_type.eql(inst_ty)) { return sema.wrapOptional(block, dest_type, inst); } else if (try sema.coerceNum(block, child_type, inst)) |some| { return sema.wrapOptional(block, dest_type, some); @@ -6860,12 +6896,12 @@ fn coerce( .Pointer => { // Coercions where the source is a single pointer to an array. src_array_ptr: { - if (!inst.ty.isSinglePointer()) break :src_array_ptr; - const array_type = inst.ty.elemType(); + if (!inst_ty.isSinglePointer()) break :src_array_ptr; + const array_type = inst_ty.elemType(); if (array_type.zigTypeTag() != .Array) break :src_array_ptr; const array_elem_type = array_type.elemType(); - if (inst.ty.isConstPtr() and !dest_type.isConstPtr()) break :src_array_ptr; - if (inst.ty.isVolatilePtr() and !dest_type.isVolatilePtr()) break :src_array_ptr; + if (inst_ty.isConstPtr() and !dest_type.isConstPtr()) break :src_array_ptr; + if (inst_ty.isVolatilePtr() and !dest_type.isVolatilePtr()) break :src_array_ptr; const dst_elem_type = dest_type.elemType(); switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type)) { @@ -6904,11 +6940,11 @@ fn coerce( }, .Int => { // integer widening - if (inst.ty.zigTypeTag() == .Int) { + if (inst_ty.zigTypeTag() == .Int) { assert(inst.value() == null); // handled above const dst_info = dest_type.intInfo(target); - const src_info = inst.ty.intInfo(target); + const src_info = inst_ty.intInfo(target); if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or // small enough unsigned ints can get casted to large enough signed ints (src_info.signedness == .signed and dst_info.signedness == .unsigned and dst_info.bits > src_info.bits)) @@ -6920,10 +6956,10 @@ fn coerce( }, .Float => { // float widening - if (inst.ty.zigTypeTag() == .Float) { + if (inst_ty.zigTypeTag() == .Float) { assert(inst.value() == null); // handled above - const src_bits = inst.ty.floatBits(target); + const src_bits = inst_ty.floatBits(target); const dst_bits = dest_type.floatBits(target); if (dst_bits >= src_bits) { try sema.requireRuntimeBlock(block, inst_src); @@ -6933,7 +6969,7 @@ fn coerce( }, .Enum => { // enum literal to enum - if (inst.ty.zigTypeTag() == .EnumLiteral) { + if (inst_ty.zigTypeTag() == .EnumLiteral) { const val = try sema.resolveConstValue(block, inst_src, inst); const bytes = val.castTag(.enum_literal).?.data; const resolved_dest_type = try sema.resolveTypeFields(block, inst_src, dest_type); @@ -6965,7 +7001,7 @@ fn coerce( else => {}, } - return mod.fail(&block.base, inst_src, "expected {}, found {}", .{ dest_type, inst.ty }); + return mod.fail(&block.base, inst_src, "expected {}, found {}", .{ dest_type, inst_ty }); } const InMemoryCoercionResult = enum { @@ -6982,7 +7018,7 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult return .no_match; } -fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) InnerError!?Air.Inst.Index { +fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) InnerError!?Air.Inst.Index { const val = inst.value() orelse return null; const src_zig_tag = inst.ty.zigTypeTag(); const dst_zig_tag = dest_type.zigTypeTag(); @@ -7020,9 +7056,15 @@ fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.I return null; } -fn coerceVarArgParam(sema: *Sema, block: *Scope.Block, inst: Air.Inst.Index) !Air.Inst.Index { - switch (inst.ty.zigTypeTag()) { - .ComptimeInt, .ComptimeFloat => return sema.mod.fail(&block.base, inst.src, "integer and float literals in var args function must be casted", .{}), +fn coerceVarArgParam( + sema: *Sema, + block: *Scope.Block, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) !Air.Inst.Ref { + const inst_ty = sema.getTypeOfAirRef(inst); + switch (inst_ty.zigTypeTag()) { + .ComptimeInt, .ComptimeFloat => return sema.mod.fail(&block.base, inst_src, "integer and float literals in var args function must be casted", .{}), else => {}, } // TODO implement more of this function. @@ -7033,8 +7075,8 @@ fn storePtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - ptr: Air.Inst.Index, - uncasted_value: Air.Inst.Index, + ptr: Air.Inst.Ref, + uncasted_value: Air.Inst.Ref, ) !void { if (ptr.ty.isConstPtr()) return sema.mod.fail(&block.base, src, "cannot assign to constant", .{}); @@ -7082,17 +7124,23 @@ fn storePtr( _ = try block.addBinOp(src, Type.initTag(.void), .store, ptr, value); } -fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { - if (inst.value()) |val| { +fn bitcast( + sema: *Sema, + block: *Scope.Block, + dest_type: Type, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) InnerError!Air.Inst.Ref { + if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { // Keep the comptime Value representation; take the new type. - return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); + return sema.addConstant(dest_type, val); } // TODO validate the type size and other compile errors - try sema.requireRuntimeBlock(block, inst.src); - return block.addUnOp(inst.src, dest_type, .bitcast, inst); + try sema.requireRuntimeBlock(block, inst_src); + return block.addTyOp(.bitcast, dest_type, inst); } -fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) InnerError!Air.Inst.Ref { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7100,7 +7148,7 @@ fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{}); } -fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Ref { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7108,12 +7156,12 @@ fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); } -fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index { +fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Ref { const decl_ref = try sema.analyzeDeclRef(block, src, decl); return sema.analyzeLoad(block, src, decl_ref, src); } -fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index { +fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Ref { try sema.mod.declareDeclDependency(sema.owner_decl, decl); sema.mod.ensureDeclAnalyzed(decl) catch |err| { if (sema.func) |func| { @@ -7128,43 +7176,41 @@ fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl if (decl_tv.val.tag() == .variable) { return sema.analyzeVarRef(block, src, decl_tv); } - return sema.mod.constInst(sema.arena, src, .{ - .ty = try sema.mod.simplePtrType(sema.arena, decl_tv.ty, false, .One), - .val = try Value.Tag.decl_ref.create(sema.arena, decl), - }); + return sema.addConstant( + try Module.simplePtrType(sema.arena, decl_tv.ty, false, .One), + try Value.Tag.decl_ref.create(sema.arena, decl), + ); } -fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!Air.Inst.Index { +fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!Air.Inst.Ref { const variable = tv.val.castTag(.variable).?.data; - const ty = try sema.mod.simplePtrType(sema.arena, tv.ty, variable.is_mutable, .One); + const ty = try Module.simplePtrType(sema.arena, tv.ty, variable.is_mutable, .One); if (!variable.is_mutable and !variable.is_extern) { - return sema.mod.constInst(sema.arena, src, .{ - .ty = ty, - .val = try Value.Tag.ref_val.create(sema.arena, variable.init), - }); + return sema.addConstant(ty, try Value.Tag.ref_val.create(sema.arena, variable.init)); } + const gpa = sema.gpa; try sema.requireRuntimeBlock(block, src); - const inst = try sema.arena.create(Inst.VarPtr); - inst.* = .{ - .base = .{ - .tag = .varptr, - .ty = ty, - .src = src, - }, - .variable = variable, - }; - try block.instructions.append(sema.gpa, &inst.base); - return &inst.base; + try sema.air_variables.append(gpa, variable); + const result_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + try sema.air_instructions.append(gpa, .{ + .tag = .varptr, + .data = .{ .ty_pl = .{ + .ty = try sema.addType(ty), + .payload = @intCast(u32, sema.air_variables.items.len - 1), + } }, + }); + try block.instructions.append(gpa, result_inst); + return indexToRef(result_inst); } fn analyzeRef( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: Air.Inst.Index, -) InnerError!Air.Inst.Index { + operand: Air.Inst.Ref, +) InnerError!Air.Inst.Ref { const ptr_type = try sema.mod.simplePtrType(sema.arena, operand.ty, false, .One); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |val| { @@ -7182,34 +7228,32 @@ fn analyzeLoad( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - ptr: Air.Inst.Index, + ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, -) InnerError!Air.Inst.Index { - const elem_ty = switch (ptr.ty.zigTypeTag()) { - .Pointer => ptr.ty.elemType(), - else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr.ty}), +) InnerError!Air.Inst.Ref { + const ptr_ty = sema.getTypeOfAirRef(ptr); + const elem_ty = switch (ptr_ty.zigTypeTag()) { + .Pointer => ptr_ty.elemType(), + else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr_ty}), }; if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| blk: { if (ptr_val.tag() == .int_u64) break :blk; // do it at runtime - return sema.mod.constInst(sema.arena, src, .{ - .ty = elem_ty, - .val = try ptr_val.pointerDeref(sema.arena), - }); + return sema.addConstant(elem_ty, try ptr_val.pointerDeref(sema.arena)); } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, elem_ty, .load, ptr); + return block.addTyOp(.load, elem_ty, ptr); } fn analyzeIsNull( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, invert_logic: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const result_ty = Type.initTag(.bool); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |opt_val| { if (opt_val.isUndef()) { @@ -7228,8 +7272,8 @@ fn analyzeIsNonErr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: Air.Inst.Index, -) InnerError!Air.Inst.Index { + operand: Air.Inst.Ref, +) InnerError!Air.Inst.Ref { const ot = operand.ty.zigTypeTag(); if (ot != .ErrorSet and ot != .ErrorUnion) return sema.mod.constBool(sema.arena, src, true); if (ot == .ErrorSet) return sema.mod.constBool(sema.arena, src, false); @@ -7249,12 +7293,12 @@ fn analyzeSlice( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: Air.Inst.Index, - start: Air.Inst.Index, + array_ptr: Air.Inst.Ref, + start: Air.Inst.Ref, end_opt: ?Air.Inst.Index, sentinel_opt: ?Air.Inst.Index, sentinel_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const ptr_child = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -7325,10 +7369,10 @@ fn cmpNumeric( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - lhs: Air.Inst.Index, - rhs: Air.Inst.Index, + lhs: Air.Inst.Ref, + rhs: Air.Inst.Ref, op: std.math.CompareOperator, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { assert(lhs.ty.isNumeric()); assert(rhs.ty.isNumeric()); @@ -7494,7 +7538,7 @@ fn cmpNumeric( return block.addBinOp(src, Type.initTag(.bool), Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } -fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Index { if (inst.value()) |val| { return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); } @@ -7503,9 +7547,15 @@ fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Ins return block.addUnOp(inst.src, dest_type, .wrap_optional, inst); } -fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn wrapErrorUnion( + sema: *Sema, + block: *Scope.Block, + dest_type: Type, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) !Air.Inst.Index { const err_union = dest_type.castTag(.error_union).?; - if (inst.value()) |val| { + if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { if (inst.ty.zigTypeTag() != .ErrorSet) { _ = try sema.coerce(block, err_union.data.payload, inst, inst.src); } else switch (err_union.data.error_set.tag()) { @@ -7710,7 +7760,7 @@ fn getBuiltin( block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const std_pkg = mod.root_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; @@ -7938,6 +7988,68 @@ fn enumFieldSrcLoc( } else unreachable; } +/// Returns the type of the AIR instruction. +fn getTypeOfAirRef(sema: *Sema, air_ref: Air.Inst.Ref) Type { + switch (air_ref) { + .none => unreachable, + .u8_type => return Type.initTag(.u8), + .i8_type => return Type.initTag(.i8), + .u16_type => return Type.initTag(.u16), + .i16_type => return Type.initTag(.i16), + .u32_type => return Type.initTag(.u32), + .i32_type => return Type.initTag(.i32), + .u64_type => return Type.initTag(.u64), + .i64_type => return Type.initTag(.i64), + .u128_type => return Type.initTag(.u128), + .i128_type => return Type.initTag(.i128), + .usize_type => return Type.initTag(.usize), + .isize_type => return Type.initTag(.isize), + .c_short_type => return Type.initTag(.c_short), + .c_ushort_type => return Type.initTag(.c_ushort), + .c_int_type => return Type.initTag(.c_int), + .c_uint_type => return Type.initTag(.c_uint), + .c_long_type => return Type.initTag(.c_long), + .c_ulong_type => return Type.initTag(.c_ulong), + .c_longlong_type => return Type.initTag(.c_longlong), + .c_ulonglong_type => return Type.initTag(.c_ulonglong), + .c_longdouble_type => return Type.initTag(.c_longdouble), + .f16_type => return Type.initTag(.f16), + .f32_type => return Type.initTag(.f32), + .f64_type => return Type.initTag(.f64), + .f128_type => return Type.initTag(.f128), + .c_void_type => return Type.initTag(.c_void), + .bool_type => return Type.initTag(.bool), + .void_type => return Type.initTag(.void), + .type_type => return Type.initTag(.type), + .anyerror_type => return Type.initTag(.anyerror), + .comptime_int_type => return Type.initTag(.comptime_int), + .comptime_float_type => return Type.initTag(.comptime_float), + .noreturn_type => return Type.initTag(.noreturn), + .anyframe_type => return Type.initTag(.@"anyframe"), + .null_type => return Type.initTag(.@"null"), + .undefined_type => return Type.initTag(.@"undefined"), + .enum_literal_type => return Type.initTag(.enum_literal), + .atomic_ordering_type => return Type.initTag(.atomic_ordering), + .atomic_rmw_op_type => return Type.initTag(.atomic_rmw_op), + .calling_convention_type => return Type.initTag(.calling_convention), + .float_mode_type => return Type.initTag(.float_mode), + .reduce_op_type => return Type.initTag(.reduce_op), + .call_options_type => return Type.initTag(.call_options), + .export_options_type => return Type.initTag(.export_options), + .extern_options_type => return Type.initTag(.extern_options), + .manyptr_u8_type => return Type.initTag(.manyptr_u8), + .manyptr_const_u8_type => return Type.initTag(.manyptr_const_u8), + .fn_noreturn_no_args_type => return Type.initTag(.fn_noreturn_no_args), + .fn_void_no_args_type => return Type.initTag(.fn_void_no_args), + .fn_naked_noreturn_no_args_type => return Type.initTag(.fn_naked_noreturn_no_args), + .fn_ccc_void_no_args_type => return Type.initTag(.fn_ccc_void_no_args), + .single_const_pointer_to_comptime_int_type => return Type.initTag(.single_const_pointer_to_comptime_int), + .const_slice_u8_type => return Type.initTag(.const_slice_u8), + else => return sema.getAirType(air_ref), + } +} + +/// Asserts the AIR instruction is a `const_ty` and returns the type. fn getAirType(sema: *Sema, air_ref: Air.Inst.Ref) Type { var i: usize = @enumToInt(air_ref); if (i < Air.Inst.Ref.typed_value_map.len) { @@ -8014,13 +8126,27 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } +pub fn addConstant(sema: *Sema, ty: Type, val: Value) InnerError!Air.Inst.Ref { + const gpa = sema.gpa; + const ty_inst = try sema.addType(ty); + try sema.air_values.append(gpa, val); + try sema.air_instructions.append(gpa, .{ + .tag = .constant, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); +} + const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; -fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { +pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { return @intToEnum(Air.Inst.Ref, ref_start_index + inst); } -fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { +pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { const ref_int = @enumToInt(inst); if (ref_int >= ref_start_index) { return ref_int - ref_start_index; diff --git a/src/codegen.zig b/src/codegen.zig index a6c4b5ad3c..c27a1444ef 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -494,7 +494,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { defer function.blocks.deinit(bin_file.allocator); defer function.exitlude_jump_relocs.deinit(bin_file.allocator); - var call_info = function.resolveCallingConventionValues(src_loc.lazy, fn_type) catch |err| switch (err) { + var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, else => |e| return e, }; @@ -537,7 +537,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.code.items.len += 4; try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); const stack_end = self.max_end_stack; if (stack_end > math.maxInt(i32)) @@ -578,7 +578,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }); } else { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); try self.dbgSetEpilogueBegin(); } }, @@ -758,11 +758,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } // TODO inline this logic into every instruction - var i: ir.Inst.DeathsBitIndex = 0; - while (inst.getOperand(i)) |operand| : (i += 1) { - if (inst.operandDies(i)) - self.processDeath(operand); - } + @panic("TODO rework AIR memory layout codegen for processing deaths"); + //var i: ir.Inst.DeathsBitIndex = 0; + //while (inst.getOperand(i)) |operand| : (i += 1) { + // if (inst.operandDies(i)) + // self.processDeath(operand); + //} } } @@ -858,74 +859,76 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const air_tags = self.air.instructions.items(.tag); switch (air_tags[inst]) { // zig fmt: off - .add => return self.genAdd(inst.castTag(.add).?), - .addwrap => return self.genAddWrap(inst.castTag(.addwrap).?), - .sub => return self.genSub(inst.castTag(.sub).?), - .subwrap => return self.genSubWrap(inst.castTag(.subwrap).?), - .mul => return self.genMul(inst.castTag(.mul).?), - .mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?), - .div => return self.genDiv(inst.castTag(.div).?), - - .cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt), - .cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte), - .cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq), - .cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte), - .cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt), - .cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq), - - .bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), - .bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), - .bit_and => return self.genBitAnd(inst.castTag(.bit_and).?), - .bit_or => return self.genBitOr(inst.castTag(.bit_or).?), - .xor => return self.genXor(inst.castTag(.xor).?), - - .alloc => return self.genAlloc(inst.castTag(.alloc).?), - .arg => return self.genArg(inst.castTag(.arg).?), - .assembly => return self.genAsm(inst.castTag(.assembly).?), - .bitcast => return self.genBitCast(inst.castTag(.bitcast).?), - .block => return self.genBlock(inst.castTag(.block).?), - .br => return self.genBr(inst.castTag(.br).?), - .br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), - .breakpoint => return self.genBreakpoint(inst.src), - .call => return self.genCall(inst.castTag(.call).?), - .cond_br => return self.genCondBr(inst.castTag(.condbr).?), - .dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?), - .floatcast => return self.genFloatCast(inst.castTag(.floatcast).?), - .intcast => return self.genIntCast(inst.castTag(.intcast).?), - .is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?), - .is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?), - .is_null => return self.genIsNull(inst.castTag(.is_null).?), - .is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), - .is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?), - .is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?), - .is_err => return self.genIsErr(inst.castTag(.is_err).?), - .is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), - .load => return self.genLoad(inst.castTag(.load).?), - .loop => return self.genLoop(inst.castTag(.loop).?), - .not => return self.genNot(inst.castTag(.not).?), - .ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?), - .ref => return self.genRef(inst.castTag(.ref).?), - .ret => return self.genRet(inst.castTag(.ret).?), - .store => return self.genStore(inst.castTag(.store).?), - .struct_field_ptr=> return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), - .switchbr => return self.genSwitch(inst.castTag(.switchbr).?), - .varptr => return self.genVarPtr(inst.castTag(.varptr).?), - - .constant => unreachable, // excluded from function bodies - .unreach => return MCValue{ .unreach = {} }, - - .optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), - .optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), - .unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?), - .unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?), - .unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?), - .unwrap_errunion_payload_ptr=> return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?), - - .wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), - .wrap_errunion_payload => return self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), - .wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?), + //.add => return self.genAdd(inst.castTag(.add).?), + //.addwrap => return self.genAddWrap(inst.castTag(.addwrap).?), + //.sub => return self.genSub(inst.castTag(.sub).?), + //.subwrap => return self.genSubWrap(inst.castTag(.subwrap).?), + //.mul => return self.genMul(inst.castTag(.mul).?), + //.mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?), + //.div => return self.genDiv(inst.castTag(.div).?), + + //.cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt), + //.cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte), + //.cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq), + //.cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte), + //.cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt), + //.cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq), + + //.bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), + //.bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), + //.bit_and => return self.genBitAnd(inst.castTag(.bit_and).?), + //.bit_or => return self.genBitOr(inst.castTag(.bit_or).?), + //.xor => return self.genXor(inst.castTag(.xor).?), + + //.alloc => return self.genAlloc(inst.castTag(.alloc).?), + //.arg => return self.genArg(inst.castTag(.arg).?), + //.assembly => return self.genAsm(inst.castTag(.assembly).?), + //.bitcast => return self.genBitCast(inst.castTag(.bitcast).?), + //.block => return self.genBlock(inst.castTag(.block).?), + //.br => return self.genBr(inst.castTag(.br).?), + //.br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), + //.breakpoint => return self.genBreakpoint(inst.src), + //.call => return self.genCall(inst.castTag(.call).?), + //.cond_br => return self.genCondBr(inst.castTag(.condbr).?), + //.dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?), + //.floatcast => return self.genFloatCast(inst.castTag(.floatcast).?), + //.intcast => return self.genIntCast(inst.castTag(.intcast).?), + //.is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?), + //.is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?), + //.is_null => return self.genIsNull(inst.castTag(.is_null).?), + //.is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), + //.is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?), + //.is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?), + //.is_err => return self.genIsErr(inst.castTag(.is_err).?), + //.is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), + //.load => return self.genLoad(inst.castTag(.load).?), + //.loop => return self.genLoop(inst.castTag(.loop).?), + //.not => return self.genNot(inst.castTag(.not).?), + //.ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?), + //.ref => return self.genRef(inst.castTag(.ref).?), + //.ret => return self.genRet(inst.castTag(.ret).?), + //.store => return self.genStore(inst.castTag(.store).?), + //.struct_field_ptr=> return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), + //.switch_br => return self.genSwitch(inst.castTag(.switchbr).?), + //.varptr => return self.genVarPtr(inst.castTag(.varptr).?), + + //.constant => unreachable, // excluded from function bodies + //.unreach => return MCValue{ .unreach = {} }, + + //.optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), + //.optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), + //.unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?), + //.unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?), + //.unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?), + //.unwrap_errunion_payload_ptr=> return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?), + + //.wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), + //.wrap_errunion_payload => return self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), + //.wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?), // zig fmt: on + + else => @panic("TODO finish air memory layout branch, more codegen.zig instructions"), } } @@ -4785,14 +4788,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }; } - fn fail(self: *Self, src: LazySrcLoc, comptime format: []const u8, args: anytype) InnerError { + fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError { @setCold(true); assert(self.err_msg == null); - const src_loc = if (src != .unneeded) - src.toSrcLocWithDecl(self.mod_fn.owner_decl) - else - self.src_loc; - self.err_msg = try ErrorMsg.create(self.bin_file.allocator, src_loc, format, args); + self.err_msg = try ErrorMsg.create(self.bin_file.allocator, self.src_loc, format, args); return error.CodegenFail; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 4743494f35..0ee6972654 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -25,7 +25,7 @@ pub const CValue = union(enum) { /// Index into local_names, but take the address. local_ref: usize, /// A constant instruction, to be rendered inline. - constant: *Inst, + constant: Air.Inst.Index, /// Index into the parameters arg: usize, /// By-value @@ -99,7 +99,7 @@ pub const Object = struct { gpa: *mem.Allocator, code: std.ArrayList(u8), value_map: CValueMap, - blocks: std.AutoHashMapUnmanaged(*ir.Inst.Block, BlockData) = .{}, + blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, next_arg_index: usize = 0, next_local_index: usize = 0, next_block_index: usize = 0, @@ -133,7 +133,12 @@ pub const Object = struct { .none => unreachable, .local => |i| return w.print("t{d}", .{i}), .local_ref => |i| return w.print("&t{d}", .{i}), - .constant => |inst| return o.dg.renderValue(w, inst.ty, inst.value().?), + .constant => |inst| { + const ty_pl = o.air.instructions.items(.data)[inst].ty_pl; + const ty = o.air.getRefType(ty_pl.ty); + const val = o.air.values[ty_pl.payload]; + return o.dg.renderValue(w, ty, val); + }, .arg => |i| return w.print("a{d}", .{i}), .decl => |decl| return w.writeAll(mem.span(decl.name)), .decl_ref => |decl| return w.print("&{s}", .{decl.name}), @@ -213,8 +218,9 @@ pub const DeclGen = struct { error_msg: ?*Module.ErrorMsg, typedefs: TypedefMap, - fn fail(dg: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { + fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { @setCold(true); + const src: LazySrcLoc = .{ .node_offset = 0 }; const src_loc = src.toSrcLocWithDecl(dg.decl); dg.error_msg = try Module.ErrorMsg.create(dg.module.gpa, src_loc, format, args); return error.AnalysisFail; @@ -230,7 +236,7 @@ pub const DeclGen = struct { // This should lower to 0xaa bytes in safe modes, and for unsafe modes should // lower to leaving variables uninitialized (that might need to be implemented // outside of this function). - return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement renderValue undef", .{}); + return dg.fail("TODO: C backend: implement renderValue undef", .{}); } switch (t.zigTypeTag()) { .Int => { @@ -440,7 +446,7 @@ pub const DeclGen = struct { }, else => unreachable, }, - else => |e| return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement value {s}", .{ + else => |e| return dg.fail("TODO: C backend: implement value {s}", .{ @tagName(e), }), } @@ -519,14 +525,14 @@ pub const DeclGen = struct { break; } } else { - return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement integer types larger than 128 bits", .{}); + return dg.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); } }, else => unreachable, } }, - .Float => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Float", .{}), + .Float => return dg.fail("TODO: C backend: implement type Float", .{}), .Pointer => { if (t.isSlice()) { @@ -681,7 +687,7 @@ pub const DeclGen = struct { try dg.renderType(w, int_tag_ty); }, - .Union => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Union", .{}), + .Union => return dg.fail("TODO: C backend: implement type Union", .{}), .Fn => { try dg.renderType(w, t.fnReturnType()); try w.writeAll(" (*)("); @@ -704,10 +710,10 @@ pub const DeclGen = struct { } try w.writeByte(')'); }, - .Opaque => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Opaque", .{}), - .Frame => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Frame", .{}), - .AnyFrame => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type AnyFrame", .{}), - .Vector => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Vector", .{}), + .Opaque => return dg.fail("TODO: C backend: implement type Opaque", .{}), + .Frame => return dg.fail("TODO: C backend: implement type Frame", .{}), + .AnyFrame => return dg.fail("TODO: C backend: implement type AnyFrame", .{}), + .Vector => return dg.fail("TODO: C backend: implement type Vector", .{}), .Null, .Undefined, @@ -760,7 +766,8 @@ pub fn genDecl(o: *Object) !void { try o.dg.renderFunctionSignature(o.writer(), is_global); try o.writer().writeByte(' '); - try genBody(o, func.body); + const main_body = o.air.getMainBody(); + try genBody(o, main_body); try o.indent_writer.insertNewline(); return; @@ -833,9 +840,9 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { } } -pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!void { +fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void { const writer = o.writer(); - if (body.instructions.len == 0) { + if (body.len == 0) { try writer.writeAll("{}"); return; } @@ -843,82 +850,85 @@ pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!voi try writer.writeAll("{\n"); o.indent_writer.pushIndent(); - for (body.instructions) |inst| { - const result_value = switch (inst.tag) { - // TODO use a different strategy for add that communicates to the optimizer - // that wrapping is UB. - .add => try genBinOp(o, inst.castTag(.add).?, " + "), - .addwrap => try genWrapOp(o, inst.castTag(.addwrap).?, " + ", "addw_"), - // TODO use a different strategy for sub that communicates to the optimizer - // that wrapping is UB. - .sub => try genBinOp(o, inst.castTag(.sub).?, " - "), - .subwrap => try genWrapOp(o, inst.castTag(.subwrap).?, " - ", "subw_"), - // TODO use a different strategy for mul that communicates to the optimizer - // that wrapping is UB. - .mul => try genBinOp(o, inst.castTag(.sub).?, " * "), - .mulwrap => try genWrapOp(o, inst.castTag(.mulwrap).?, " * ", "mulw_"), - // TODO use a different strategy for div that communicates to the optimizer - // that wrapping is UB. - .div => try genBinOp(o, inst.castTag(.div).?, " / "), - - .constant => unreachable, // excluded from function bodies - .alloc => try genAlloc(o, inst.castTag(.alloc).?), - .arg => genArg(o), - .assembly => try genAsm(o, inst.castTag(.assembly).?), - .block => try genBlock(o, inst.castTag(.block).?), - .bitcast => try genBitcast(o, inst.castTag(.bitcast).?), - .breakpoint => try genBreakpoint(o, inst.castTag(.breakpoint).?), - .call => try genCall(o, inst.castTag(.call).?), - .cmp_eq => try genBinOp(o, inst.castTag(.cmp_eq).?, " == "), - .cmp_gt => try genBinOp(o, inst.castTag(.cmp_gt).?, " > "), - .cmp_gte => try genBinOp(o, inst.castTag(.cmp_gte).?, " >= "), - .cmp_lt => try genBinOp(o, inst.castTag(.cmp_lt).?, " < "), - .cmp_lte => try genBinOp(o, inst.castTag(.cmp_lte).?, " <= "), - .cmp_neq => try genBinOp(o, inst.castTag(.cmp_neq).?, " != "), - .dbg_stmt => try genDbgStmt(o, inst.castTag(.dbg_stmt).?), - .intcast => try genIntCast(o, inst.castTag(.intcast).?), - .load => try genLoad(o, inst.castTag(.load).?), - .ret => try genRet(o, inst.castTag(.ret).?), - .retvoid => try genRetVoid(o), - .store => try genStore(o, inst.castTag(.store).?), - .unreach => try genUnreach(o, inst.castTag(.unreach).?), - .loop => try genLoop(o, inst.castTag(.loop).?), - .condbr => try genCondBr(o, inst.castTag(.condbr).?), - .br => try genBr(o, inst.castTag(.br).?), - .br_void => try genBrVoid(o, inst.castTag(.br_void).?.block), - .switchbr => try genSwitchBr(o, inst.castTag(.switchbr).?), - // bool_and and bool_or are non-short-circuit operations - .bool_and => try genBinOp(o, inst.castTag(.bool_and).?, " & "), - .bool_or => try genBinOp(o, inst.castTag(.bool_or).?, " | "), - .bit_and => try genBinOp(o, inst.castTag(.bit_and).?, " & "), - .bit_or => try genBinOp(o, inst.castTag(.bit_or).?, " | "), - .xor => try genBinOp(o, inst.castTag(.xor).?, " ^ "), - .not => try genUnOp(o, inst.castTag(.not).?, "!"), - .is_null => try genIsNull(o, inst.castTag(.is_null).?), - .is_non_null => try genIsNull(o, inst.castTag(.is_non_null).?), - .is_null_ptr => try genIsNull(o, inst.castTag(.is_null_ptr).?), - .is_non_null_ptr => try genIsNull(o, inst.castTag(.is_non_null_ptr).?), - .wrap_optional => try genWrapOptional(o, inst.castTag(.wrap_optional).?), - .optional_payload => try genOptionalPayload(o, inst.castTag(.optional_payload).?), - .optional_payload_ptr => try genOptionalPayload(o, inst.castTag(.optional_payload_ptr).?), - .ref => try genRef(o, inst.castTag(.ref).?), - .struct_field_ptr => try genStructFieldPtr(o, inst.castTag(.struct_field_ptr).?), - - .is_err => try genIsErr(o, inst.castTag(.is_err).?, "", ".", "!="), - .is_non_err => try genIsErr(o, inst.castTag(.is_non_err).?, "", ".", "=="), - .is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?, "*", "->", "!="), - .is_non_err_ptr => try genIsErr(o, inst.castTag(.is_non_err_ptr).?, "*", "->", "=="), - - .unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?), - .unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?), - .unwrap_errunion_payload_ptr => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload_ptr).?), - .unwrap_errunion_err_ptr => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err_ptr).?), - .wrap_errunion_payload => try genWrapErrUnionPay(o, inst.castTag(.wrap_errunion_payload).?), - .wrap_errunion_err => try genWrapErrUnionErr(o, inst.castTag(.wrap_errunion_err).?), - .br_block_flat => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for br_block_flat", .{}), - .ptrtoint => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for ptrtoint", .{}), - .varptr => try genVarPtr(o, inst.castTag(.varptr).?), - .floatcast => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for floatcast", .{}), + const air_tags = o.air.instructions.items(.tag); + + for (body) |inst| { + const result_value = switch (air_tags[inst]) { + //// TODO use a different strategy for add that communicates to the optimizer + //// that wrapping is UB. + //.add => try genBinOp(o, inst.castTag(.add).?, " + "), + //.addwrap => try genWrapOp(o, inst.castTag(.addwrap).?, " + ", "addw_"), + //// TODO use a different strategy for sub that communicates to the optimizer + //// that wrapping is UB. + //.sub => try genBinOp(o, inst.castTag(.sub).?, " - "), + //.subwrap => try genWrapOp(o, inst.castTag(.subwrap).?, " - ", "subw_"), + //// TODO use a different strategy for mul that communicates to the optimizer + //// that wrapping is UB. + //.mul => try genBinOp(o, inst.castTag(.sub).?, " * "), + //.mulwrap => try genWrapOp(o, inst.castTag(.mulwrap).?, " * ", "mulw_"), + //// TODO use a different strategy for div that communicates to the optimizer + //// that wrapping is UB. + //.div => try genBinOp(o, inst.castTag(.div).?, " / "), + + //.constant => unreachable, // excluded from function bodies + //.alloc => try genAlloc(o, inst.castTag(.alloc).?), + //.arg => genArg(o), + //.assembly => try genAsm(o, inst.castTag(.assembly).?), + //.block => try genBlock(o, inst.castTag(.block).?), + //.bitcast => try genBitcast(o, inst.castTag(.bitcast).?), + //.breakpoint => try genBreakpoint(o, inst.castTag(.breakpoint).?), + //.call => try genCall(o, inst.castTag(.call).?), + //.cmp_eq => try genBinOp(o, inst.castTag(.cmp_eq).?, " == "), + //.cmp_gt => try genBinOp(o, inst.castTag(.cmp_gt).?, " > "), + //.cmp_gte => try genBinOp(o, inst.castTag(.cmp_gte).?, " >= "), + //.cmp_lt => try genBinOp(o, inst.castTag(.cmp_lt).?, " < "), + //.cmp_lte => try genBinOp(o, inst.castTag(.cmp_lte).?, " <= "), + //.cmp_neq => try genBinOp(o, inst.castTag(.cmp_neq).?, " != "), + //.dbg_stmt => try genDbgStmt(o, inst.castTag(.dbg_stmt).?), + //.intcast => try genIntCast(o, inst.castTag(.intcast).?), + //.load => try genLoad(o, inst.castTag(.load).?), + //.ret => try genRet(o, inst.castTag(.ret).?), + //.retvoid => try genRetVoid(o), + //.store => try genStore(o, inst.castTag(.store).?), + //.unreach => try genUnreach(o, inst.castTag(.unreach).?), + //.loop => try genLoop(o, inst.castTag(.loop).?), + //.condbr => try genCondBr(o, inst.castTag(.condbr).?), + //.br => try genBr(o, inst.castTag(.br).?), + //.br_void => try genBrVoid(o, inst.castTag(.br_void).?.block), + //.switchbr => try genSwitchBr(o, inst.castTag(.switchbr).?), + //// bool_and and bool_or are non-short-circuit operations + //.bool_and => try genBinOp(o, inst.castTag(.bool_and).?, " & "), + //.bool_or => try genBinOp(o, inst.castTag(.bool_or).?, " | "), + //.bit_and => try genBinOp(o, inst.castTag(.bit_and).?, " & "), + //.bit_or => try genBinOp(o, inst.castTag(.bit_or).?, " | "), + //.xor => try genBinOp(o, inst.castTag(.xor).?, " ^ "), + //.not => try genUnOp(o, inst.castTag(.not).?, "!"), + //.is_null => try genIsNull(o, inst.castTag(.is_null).?), + //.is_non_null => try genIsNull(o, inst.castTag(.is_non_null).?), + //.is_null_ptr => try genIsNull(o, inst.castTag(.is_null_ptr).?), + //.is_non_null_ptr => try genIsNull(o, inst.castTag(.is_non_null_ptr).?), + //.wrap_optional => try genWrapOptional(o, inst.castTag(.wrap_optional).?), + //.optional_payload => try genOptionalPayload(o, inst.castTag(.optional_payload).?), + //.optional_payload_ptr => try genOptionalPayload(o, inst.castTag(.optional_payload_ptr).?), + //.ref => try genRef(o, inst.castTag(.ref).?), + //.struct_field_ptr => try genStructFieldPtr(o, inst.castTag(.struct_field_ptr).?), + + //.is_err => try genIsErr(o, inst.castTag(.is_err).?, "", ".", "!="), + //.is_non_err => try genIsErr(o, inst.castTag(.is_non_err).?, "", ".", "=="), + //.is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?, "*", "->", "!="), + //.is_non_err_ptr => try genIsErr(o, inst.castTag(.is_non_err_ptr).?, "*", "->", "=="), + + //.unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?), + //.unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?), + //.unwrap_errunion_payload_ptr => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload_ptr).?), + //.unwrap_errunion_err_ptr => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err_ptr).?), + //.wrap_errunion_payload => try genWrapErrUnionPay(o, inst.castTag(.wrap_errunion_payload).?), + //.wrap_errunion_err => try genWrapErrUnionErr(o, inst.castTag(.wrap_errunion_err).?), + //.br_block_flat => return o.dg.fail("TODO: C backend: implement codegen for br_block_flat", .{}), + //.ptrtoint => return o.dg.fail("TODO: C backend: implement codegen for ptrtoint", .{}), + //.varptr => try genVarPtr(o, inst.castTag(.varptr).?), + //.floatcast => return o.dg.fail("TODO: C backend: implement codegen for floatcast", .{}), + else => return o.dg.fail("TODO: C backend: rework AIR memory layout", .{}), }; switch (result_value) { .none => {}, @@ -1060,7 +1070,7 @@ fn genWrapOp(o: *Object, inst: *Inst.BinOp, str_op: [*:0]const u8, fn_op: [*:0]c } if (bits > 64) { - return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: genWrapOp for large integers", .{}); + return o.dg.fail("TODO: C backend: genWrapOp for large integers", .{}); } var min_buf: [80]u8 = undefined; @@ -1227,7 +1237,7 @@ fn genCall(o: *Object, inst: *Inst.Call) !CValue { try writer.writeAll(");\n"); return result_local; } else { - return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement function pointers", .{}); + return o.dg.fail("TODO: C backend: implement function pointers", .{}); } } @@ -1390,13 +1400,13 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue { try o.writeCValue(writer, arg_c_value); try writer.writeAll(";\n"); } else { - return o.dg.fail(.{ .node_offset = 0 }, "TODO non-explicit inline asm regs", .{}); + return o.dg.fail("TODO non-explicit inline asm regs", .{}); } } const volatile_string: []const u8 = if (as.is_volatile) "volatile " else ""; try writer.print("__asm {s}(\"{s}\"", .{ volatile_string, as.asm_source }); if (as.output_constraint) |_| { - return o.dg.fail(.{ .node_offset = 0 }, "TODO: CBE inline asm output", .{}); + return o.dg.fail("TODO: CBE inline asm output", .{}); } if (as.inputs.len > 0) { if (as.output_constraint == null) { @@ -1421,7 +1431,7 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue { if (as.base.isUnused()) return CValue.none; - return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: inline asm expression result used", .{}); + return o.dg.fail("TODO: C backend: inline asm expression result used", .{}); } fn genIsNull(o: *Object, inst: *Inst.UnOp) !CValue { diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 0d05b97846..c93f04f618 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2519,6 +2519,9 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); + var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_line_buffer.deinit(); + var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); defer dbg_info_buffer.deinit(); diff --git a/src/value.zig b/src/value.zig index 48cd6fffc4..0f7194d8c1 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1700,7 +1700,7 @@ pub const Value = extern union { /// peer type resolution. This is stored in a separate list so that /// the items are contiguous in memory and thus can be passed to /// `Module.resolvePeerTypes`. - stored_inst_list: std.ArrayListUnmanaged(*ir.Inst) = .{}, + stored_inst_list: std.ArrayListUnmanaged(Air.Inst.Index) = .{}, }, }; -- cgit v1.2.3 From 3c5927fb87034affd6af56ecd5d9ae07fe23d690 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Jul 2021 12:16:48 -0700 Subject: Sema: add a strategy for handling costly source locations Now you can pass `.unneeded` for a `LazySrcLoc` and if there ended up being a compile error that needed it, you'll get `error.NeededSourceLocation`. Callsites can now exploit this error to do the expensive computation to produce a source location object and then repeat the operation. --- src/Compilation.zig | 6 +- src/Module.zig | 32 +-- src/Sema.zig | 585 ++++++++++++++++++++++++++-------------------------- 3 files changed, 317 insertions(+), 306 deletions(-) (limited to 'src/Compilation.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index 4a442a8b67..f241ae6b10 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -148,7 +148,7 @@ emit_docs: ?EmitLoc, work_queue_wait_group: WaitGroup, astgen_wait_group: WaitGroup, -pub const InnerError = Module.InnerError; +pub const SemaError = Module.SemaError; pub const CRTFile = struct { lock: Cache.Lock, @@ -3170,7 +3170,7 @@ pub fn addCCArgs( try argv.appendSlice(comp.clang_argv); } -fn failCObj(comp: *Compilation, c_object: *CObject, comptime format: []const u8, args: anytype) InnerError { +fn failCObj(comp: *Compilation, c_object: *CObject, comptime format: []const u8, args: anytype) SemaError { @setCold(true); const err_msg = blk: { const msg = try std.fmt.allocPrint(comp.gpa, format, args); @@ -3191,7 +3191,7 @@ fn failCObjWithOwnedErrorMsg( comp: *Compilation, c_object: *CObject, err_msg: *CObject.ErrorMsg, -) InnerError { +) SemaError { @setCold(true); { const lock = comp.mutex.acquire(); diff --git a/src/Module.zig b/src/Module.zig index 3ce3c47f14..0a082313b3 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1996,7 +1996,8 @@ pub const LazySrcLoc = union(enum) { } }; -pub const InnerError = error{ OutOfMemory, AnalysisFail }; +pub const SemaError = error{ OutOfMemory, AnalysisFail }; +pub const CompileError = error{ OutOfMemory, AnalysisFail, NeededSourceLocation }; pub fn deinit(mod: *Module) void { const gpa = mod.gpa; @@ -2635,7 +2636,7 @@ pub fn mapOldZirToNew( } } -pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) InnerError!void { +pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) SemaError!void { const tracy = trace(@src()); defer tracy.end(); @@ -2735,7 +2736,7 @@ pub fn semaPkg(mod: *Module, pkg: *Package) !void { /// Regardless of the file status, will create a `Decl` so that we /// can track dependencies and re-analyze when the file becomes outdated. -pub fn semaFile(mod: *Module, file: *Scope.File) InnerError!void { +pub fn semaFile(mod: *Module, file: *Scope.File) SemaError!void { const tracy = trace(@src()); defer tracy.end(); @@ -3150,7 +3151,7 @@ pub fn scanNamespace( extra_start: usize, decls_len: u32, parent_decl: *Decl, -) InnerError!usize { +) SemaError!usize { const tracy = trace(@src()); defer tracy.end(); @@ -3197,7 +3198,7 @@ const ScanDeclIter = struct { unnamed_test_index: usize = 0, }; -fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) InnerError!void { +fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!void { const tracy = trace(@src()); defer tracy.end(); @@ -3451,7 +3452,7 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void { mod.gpa.free(kv.value); } -pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { +pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) SemaError!Air { const tracy = trace(@src()); defer tracy.end(); @@ -3804,7 +3805,7 @@ pub fn fail( src: LazySrcLoc, comptime format: []const u8, args: anytype, -) InnerError { +) CompileError { const err_msg = try mod.errMsg(scope, src, format, args); return mod.failWithOwnedErrorMsg(scope, err_msg); } @@ -3817,7 +3818,7 @@ pub fn failTok( token_index: ast.TokenIndex, comptime format: []const u8, args: anytype, -) InnerError { +) CompileError { const src = scope.srcDecl().?.tokSrcLoc(token_index); return mod.fail(scope, src, format, args); } @@ -3830,18 +3831,21 @@ pub fn failNode( node_index: ast.Node.Index, comptime format: []const u8, args: anytype, -) InnerError { +) CompileError { const src = scope.srcDecl().?.nodeSrcLoc(node_index); return mod.fail(scope, src, format, args); } -pub fn failWithOwnedErrorMsg(mod: *Module, scope: *Scope, err_msg: *ErrorMsg) InnerError { +pub fn failWithOwnedErrorMsg(mod: *Module, scope: *Scope, err_msg: *ErrorMsg) CompileError { @setCold(true); { errdefer err_msg.destroy(mod.gpa); - try mod.failed_decls.ensureCapacity(mod.gpa, mod.failed_decls.count() + 1); - try mod.failed_files.ensureCapacity(mod.gpa, mod.failed_files.count() + 1); + if (err_msg.src_loc.lazy == .unneeded) { + return error.NeededSourceLocation; + } + try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); + try mod.failed_files.ensureUnusedCapacity(mod.gpa, 1); } switch (scope.tag) { .block => { @@ -4340,7 +4344,7 @@ pub const SwitchProngSrc = union(enum) { } }; -pub fn analyzeStructFields(mod: *Module, struct_obj: *Struct) InnerError!void { +pub fn analyzeStructFields(mod: *Module, struct_obj: *Struct) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -4490,7 +4494,7 @@ pub fn analyzeStructFields(mod: *Module, struct_obj: *Struct) InnerError!void { } } -pub fn analyzeUnionFields(mod: *Module, union_obj: *Union) InnerError!void { +pub fn analyzeUnionFields(mod: *Module, union_obj: *Union) CompileError!void { const tracy = trace(@src()); defer tracy.end(); diff --git a/src/Sema.zig b/src/Sema.zig index 829dd843cc..91f81ffeed 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -61,7 +61,8 @@ const Zir = @import("Zir.zig"); const Module = @import("Module.zig"); const trace = @import("tracy.zig").trace; const Scope = Module.Scope; -const InnerError = Module.InnerError; +const CompileError = Module.CompileError; +const SemaError = Module.SemaError; const Decl = Module.Decl; const LazySrcLoc = Module.LazySrcLoc; const RangeSet = @import("RangeSet.zig"); @@ -83,7 +84,7 @@ pub fn analyzeFnBody( sema: *Sema, block: *Scope.Block, fn_body_inst: Zir.Inst.Index, -) InnerError!void { +) SemaError!void { const tags = sema.code.instructions.items(.tag); const datas = sema.code.instructions.items(.data); const body: []const Zir.Inst.Index = switch (tags[fn_body_inst]) { @@ -109,13 +110,16 @@ pub fn analyzeFnBody( }, else => unreachable, }; - _ = try sema.analyzeBody(block, body); + _ = sema.analyzeBody(block, body) catch |err| switch (err) { + error.NeededSourceLocation => unreachable, + else => |e| return e, + }; } /// Returns only the result from the body that is specified. /// Only appropriate to call when it is determined at comptime that this body /// has no peers. -fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) CompileError!Air.Inst.Ref { const break_inst = try sema.analyzeBody(block, body); const operand_ref = sema.code.instructions.items(.data)[break_inst].@"break".operand; return sema.resolveInst(operand_ref); @@ -125,7 +129,7 @@ fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) I /// return type of `analyzeBody` so that we can tail call them. /// Only appropriate to return when the instruction is known to be NoReturn /// solely based on the ZIR tag. -const always_noreturn: InnerError!Zir.Inst.Index = @as(Zir.Inst.Index, undefined); +const always_noreturn: CompileError!Zir.Inst.Index = @as(Zir.Inst.Index, undefined); /// This function is the main loop of `Sema` and it can be used in two different ways: /// * The traditional way where there are N breaks out of the block and peer type @@ -140,7 +144,7 @@ pub fn analyzeBody( sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index, -) InnerError!Zir.Inst.Index { +) CompileError!Zir.Inst.Index { // No tracy calls here, to avoid interfering with the tail call mechanism. const map = &block.sema.inst_map; @@ -541,7 +545,7 @@ pub fn analyzeBody( } } -fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const extended = sema.code.instructions.items(.data)[inst].extended; switch (extended.opcode) { // zig fmt: off @@ -638,7 +642,7 @@ fn resolveConstValue( block: *Scope.Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, -) !Value { +) CompileError!Value { return (try sema.resolveDefinedValue(block, src, air_ref)) orelse return sema.failWithNeededComptime(block, src); } @@ -648,7 +652,7 @@ fn resolveDefinedValue( block: *Scope.Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, -) !?Value { +) CompileError!?Value { if (try sema.resolvePossiblyUndefinedValue(block, src, air_ref)) |val| { if (val.isUndef()) { return sema.failWithUseOfUndef(block, src); @@ -663,7 +667,7 @@ fn resolvePossiblyUndefinedValue( block: *Scope.Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, -) !?Value { +) CompileError!?Value { const ty = sema.getTypeOf(air_ref); if (try sema.typeHasOnePossibleValue(block, src, ty)) |opv| { return opv; @@ -687,11 +691,11 @@ fn resolvePossiblyUndefinedValue( } } -fn failWithNeededComptime(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) InnerError { +fn failWithNeededComptime(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) CompileError { return sema.mod.fail(&block.base, src, "unable to resolve comptime value", .{}); } -fn failWithUseOfUndef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) InnerError { +fn failWithUseOfUndef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) CompileError { return sema.mod.fail(&block.base, src, "use of undefined value here causes undefined behavior", .{}); } @@ -733,7 +737,7 @@ pub fn resolveInstConst( block: *Scope.Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, -) InnerError!TypedValue { +) CompileError!TypedValue { const air_ref = sema.resolveInst(zir_ref); const val = try sema.resolveConstValue(block, src, air_ref); return TypedValue{ @@ -742,13 +746,13 @@ pub fn resolveInstConst( }; } -fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO implement zir_sema.zirBitcastResultPtr", .{}); } -fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = inst; const tracy = trace(@src()); defer tracy.end(); @@ -760,7 +764,7 @@ pub fn analyzeStructDecl( new_decl: *Decl, inst: Zir.Inst.Index, struct_obj: *Module.Struct, -) InnerError!void { +) SemaError!void { const extended = sema.code.instructions.items(.data)[inst].extended; assert(extended.opcode == .struct_decl); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -783,7 +787,7 @@ fn zirStructDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); @@ -854,7 +858,7 @@ fn zirEnumDecl( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1051,7 +1055,7 @@ fn zirUnionDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1115,7 +1119,7 @@ fn zirOpaqueDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1135,7 +1139,7 @@ fn zirErrorSetDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1175,7 +1179,7 @@ fn zirRetPtr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1187,7 +1191,7 @@ fn zirRetPtr( return block.addNoOp(src, ptr_type, .alloc); } -fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1200,7 +1204,7 @@ fn zirRetType( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1211,7 +1215,7 @@ fn zirRetType( return sema.addType(ret_type); } -fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1227,14 +1231,14 @@ fn ensureResultUsed( block: *Scope.Block, operand: Air.Inst.Ref, src: LazySrcLoc, -) InnerError!void { +) CompileError!void { switch (operand.ty.zigTypeTag()) { .Void, .NoReturn => return, else => return sema.mod.fail(&block.base, src, "expression value is ignored", .{}), } } -fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1247,7 +1251,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde } } -fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1281,7 +1285,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const arg_name = inst_data.get(sema.code); const arg_index = sema.next_arg_index; @@ -1304,13 +1308,13 @@ fn zirAllocExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocExtended", .{}); } -fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1333,13 +1337,13 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne }); } -fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocInferredComptime", .{}); } -fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1352,7 +1356,7 @@ fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!A return block.addNoOp(var_decl_src, ptr_type, .alloc); } -fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1371,7 +1375,7 @@ fn zirAllocInferred( block: *Scope.Block, inst: Zir.Inst.Index, inferred_alloc_ty: Type, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1395,7 +1399,7 @@ fn zirAllocInferred( return result; } -fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1421,7 +1425,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde ptr.tag = .alloc; } -fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1494,7 +1498,7 @@ fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Ind } } -fn zirValidateArrayInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirValidateArrayInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO implement Sema.zirValidateArrayInitPtr", .{}); @@ -1506,7 +1510,7 @@ fn failWithBadFieldAccess( struct_obj: *Module.Struct, field_src: LazySrcLoc, field_name: []const u8, -) InnerError { +) CompileError { const mod = sema.mod; const gpa = sema.gpa; @@ -1533,7 +1537,7 @@ fn failWithBadUnionFieldAccess( union_obj: *Module.Union, field_src: LazySrcLoc, field_name: []const u8, -) InnerError { +) CompileError { const mod = sema.mod; const gpa = sema.gpa; @@ -1554,7 +1558,7 @@ fn failWithBadUnionFieldAccess( return mod.failWithOwnedErrorMsg(&block.base, msg); } -fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1575,7 +1579,7 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return sema.storePtr(block, src, bitcasted_ptr, value); } -fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1594,7 +1598,7 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) return sema.storePtr(block, src, bitcasted_ptr, value); } -fn zirSetEvalBranchQuota(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirSetEvalBranchQuota(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const quota = try sema.resolveAlreadyCoercedInt(block, src, inst_data.operand, u32); @@ -1602,7 +1606,7 @@ fn zirSetEvalBranchQuota(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) sema.branch_quota = quota; } -fn zirStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1612,7 +1616,7 @@ fn zirStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!v return sema.storePtr(block, sema.src, ptr, value); } -fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1624,7 +1628,7 @@ fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.storePtr(block, src, ptr, value); } -fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1660,7 +1664,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.addType(param_type); } -fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1688,7 +1692,7 @@ fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.analyzeDeclRef(block, .unneeded, new_decl); } -fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1697,7 +1701,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.addIntUnsigned(Type.initTag(.comptime_int), int); } -fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1715,7 +1719,7 @@ fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! }); } -fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].float; @@ -1728,7 +1732,7 @@ fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!A }); } -fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; @@ -1742,7 +1746,7 @@ fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro }); } -fn zirCompileError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { +fn zirCompileError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1757,7 +1761,7 @@ fn zirCompileLog( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { var managed = sema.mod.compile_log_text.toManaged(sema.gpa); defer sema.mod.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); @@ -1789,7 +1793,7 @@ fn zirCompileLog( }); } -fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { +fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1799,7 +1803,7 @@ fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return always_noreturn; } -fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { +fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); const msg_inst = sema.resolveInst(inst_data.operand); @@ -1807,7 +1811,7 @@ fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Z return sema.panicWithMsg(block, src, msg_inst); } -fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1872,7 +1876,7 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } -fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1882,13 +1886,13 @@ fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirCImport", .{}); } -fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirSuspendBlock", .{}); } -fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1946,7 +1950,7 @@ fn resolveBlockBody( child_block: *Scope.Block, body: []const Zir.Inst.Index, merges: *Scope.Block.Merges, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { _ = try sema.analyzeBody(child_block, body); return sema.analyzeBlockBody(parent_block, src, child_block, merges); } @@ -1957,7 +1961,7 @@ fn analyzeBlockBody( src: LazySrcLoc, child_block: *Scope.Block, merges: *Scope.Block.Merges, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2033,7 +2037,7 @@ fn analyzeBlockBody( return &merges.block_inst.base; } -fn zirExport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirExport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -2069,13 +2073,13 @@ fn zirExport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! try sema.mod.analyzeExport(&block.base, src, export_name, decl); } -fn zirSetAlignStack(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirSetAlignStack(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirSetAlignStack", .{}); } -fn zirSetCold(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirSetCold(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const is_cold = try sema.resolveConstBool(block, operand_src, inst_data.operand); @@ -2083,19 +2087,19 @@ fn zirSetCold(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError func.is_cold = is_cold; } -fn zirSetFloatMode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirSetFloatMode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirSetFloatMode", .{}); } -fn zirSetRuntimeSafety(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirSetRuntimeSafety(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; block.want_safety = try sema.resolveConstBool(block, operand_src, inst_data.operand); } -fn zirBreakpoint(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirBreakpoint(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -2105,13 +2109,13 @@ fn zirBreakpoint(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr _ = try block.addNoOp(src, Type.initTag(.void), .breakpoint); } -fn zirFence(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirFence(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirFence", .{}); } -fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { +fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2151,7 +2155,7 @@ fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) InnerE } } -fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -2165,7 +2169,7 @@ fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError _ = try block.addDbgStmt(.unneeded, inst_data.line, inst_data.column); } -fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2173,7 +2177,7 @@ fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeDeclRef(block, src, decl); } -fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2199,7 +2203,7 @@ fn lookupInNamespace( sema: *Sema, namespace: *Scope.Namespace, ident_name: []const u8, -) InnerError!?*Decl { +) CompileError!?*Decl { const namespace_decl = namespace.getDecl(); if (namespace_decl.analysis == .file_failure) { try sema.mod.declareDeclDependency(sema.owner_decl, namespace_decl); @@ -2227,7 +2231,7 @@ fn zirCall( inst: Zir.Inst.Index, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2257,7 +2261,7 @@ fn analyzeCall( modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, args: []const Air.Inst.Ref, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { if (func.ty.zigTypeTag() != .Fn) return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func.ty}); @@ -2412,7 +2416,7 @@ fn analyzeCall( return result; } -fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2423,7 +2427,7 @@ fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.addType(ty); } -fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2435,7 +2439,7 @@ fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.addType(opt_type); } -fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const array_type = try sema.resolveType(block, src, inst_data.operand); @@ -2443,7 +2447,7 @@ fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.addType(elem_type); } -fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; @@ -2457,7 +2461,7 @@ fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.addType(vector_type); } -fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2470,7 +2474,7 @@ fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.addType(array_ty); } -fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2485,7 +2489,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) return sema.addType(array_ty); } -fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2497,7 +2501,7 @@ fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.addType(anyframe_type); } -fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2517,7 +2521,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.addType(err_union_ty); } -fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2536,7 +2540,7 @@ fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr }); } -fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2566,7 +2570,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addTyOp(.bitcast, result_ty, op_coerced); } -fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2599,7 +2603,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addTyOp(.bitcast, Type.initTag(.anyerror), op); } -fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2689,7 +2693,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn }); } -fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2703,7 +2707,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE }); } -fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; @@ -2741,7 +2745,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr }); } - if (enum_tag.value()) |enum_tag_val| { + if (try sema.resolvePossiblyUndefinedValue(block, operand_src, enum_tag)) |enum_tag_val| { if (enum_tag_val.castTag(.enum_field_index)) |enum_field_payload| { const field_index = enum_field_payload.data; switch (enum_tag.ty.tag()) { @@ -2785,7 +2789,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return block.addTyOp(.bitcast, int_tag_ty, enum_tag); } -fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const target = mod.getTarget(); const arena = sema.arena; @@ -2801,16 +2805,16 @@ fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return mod.fail(&block.base, dest_ty_src, "expected enum, found {}", .{dest_ty}); } - if (dest_ty.isNonexhaustiveEnum()) { - if (operand.value()) |int_val| { + if (try sema.resolvePossiblyUndefinedValue(block, operand_src, operand)) |int_val| { + if (dest_ty.isNonexhaustiveEnum()) { return mod.constInst(arena, src, .{ .ty = dest_ty, .val = int_val, }); } - } - - if (try sema.resolveDefinedValue(block, operand_src, operand)) |int_val| { + if (int_val.isUndef()) { + return sema.failWithUseOfUndef(block, operand_src); + } if (!dest_ty.enumHasInt(int_val, target)) { const msg = msg: { const msg = try mod.errMsg( @@ -2846,7 +2850,7 @@ fn zirOptionalPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2863,7 +2867,7 @@ fn zirOptionalPayloadPtr( const child_type = try opt_type.optionalChildAlloc(sema.arena); const child_pointer = try Module.simplePtrType(sema.arena, child_type, !optional_ptr.ty.isConstPtr(), .One); - if (optional_ptr.value()) |pointer_val| { + if (try sema.resolveDefinedValue(block, src, optional_ptr)) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); if (val.isNull()) { return sema.mod.fail(&block.base, src, "unable to unwrap null", .{}); @@ -2889,7 +2893,7 @@ fn zirOptionalPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2903,7 +2907,7 @@ fn zirOptionalPayload( const child_type = try opt_type.optionalChildAlloc(sema.arena); - if (operand.value()) |val| { + if (try sema.resolveDefinedValue(block, src, operand)) |val| { if (val.isNull()) { return sema.mod.fail(&block.base, src, "unable to unwrap null", .{}); } @@ -2927,7 +2931,7 @@ fn zirErrUnionPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2937,7 +2941,7 @@ fn zirErrUnionPayload( if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, operand.src, "expected error union type, found '{}'", .{operand.ty}); - if (operand.value()) |val| { + if (try sema.resolveDefinedValue(block, src, operand)) |val| { if (val.getError()) |name| { return sema.mod.fail(&block.base, src, "caught unexpected error '{s}'", .{name}); } @@ -2962,7 +2966,7 @@ fn zirErrUnionPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2976,7 +2980,7 @@ fn zirErrUnionPayloadPtr( const operand_pointer_ty = try Module.simplePtrType(sema.arena, operand.ty.elemType().castTag(.error_union).?.data.payload, !operand.ty.isConstPtr(), .One); - if (operand.value()) |pointer_val| { + if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); if (val.getError()) |name| { return sema.mod.fail(&block.base, src, "caught unexpected error '{s}'", .{name}); @@ -3001,7 +3005,7 @@ fn zirErrUnionPayloadPtr( } /// Value in, value out -fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3013,7 +3017,7 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner const result_ty = operand.ty.castTag(.error_union).?.data.error_set; - if (operand.value()) |val| { + if (try sema.resolveDefinedValue(block, src, operand)) |val| { assert(val.getError() != null); const data = val.castTag(.error_union).?.data; return sema.mod.constInst(sema.arena, src, .{ @@ -3027,7 +3031,7 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner } /// Pointer in, value out -fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3041,7 +3045,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In const result_ty = operand.ty.elemType().castTag(.error_union).?.data.error_set; - if (operand.value()) |pointer_val| { + if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); assert(val.getError() != null); const data = val.castTag(.error_union).?.data; @@ -3055,7 +3059,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return block.addTyOp(.unwrap_errunion_err_ptr, result_ty, operand); } -fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -3074,7 +3078,7 @@ fn zirFunc( block: *Scope.Block, inst: Zir.Inst.Index, inferred_error_set: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3125,7 +3129,7 @@ fn funcCommon( is_extern: bool, src_locs: Zir.Inst.Func.SrcLocs, opt_lib_name: ?[]const u8, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const bare_return_type = try sema.resolveType(block, ret_ty_src, zir_return_type); @@ -3266,7 +3270,7 @@ fn funcCommon( return result; } -fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3274,7 +3278,7 @@ fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air. return sema.analyzeAs(block, .unneeded, bin_inst.lhs, bin_inst.rhs); } -fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3290,13 +3294,13 @@ fn analyzeAs( src: LazySrcLoc, zir_dest_type: Zir.Inst.Ref, zir_operand: Zir.Inst.Ref, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const dest_type = try sema.resolveType(block, src, zir_dest_type); const operand = sema.resolveInst(zir_operand); return sema.coerce(block, dest_type, operand, src); } -fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3312,7 +3316,7 @@ fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(.ptrtoint, ptr); } -fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3330,7 +3334,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3343,7 +3347,7 @@ fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3358,7 +3362,7 @@ fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3371,7 +3375,7 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3414,7 +3418,7 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten int", .{}); } -fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3428,7 +3432,7 @@ fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.bitcast(block, dest_type, operand, operand_src); } -fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3471,7 +3475,7 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten float", .{}); } -fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3486,7 +3490,7 @@ fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeLoad(block, sema.src, result_ptr, sema.src); } -fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3504,7 +3508,7 @@ fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3514,7 +3518,7 @@ fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); } -fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3527,7 +3531,7 @@ fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); } -fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3540,7 +3544,7 @@ fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.analyzeSlice(block, src, array_ptr, start, null, null, .unneeded); } -fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3554,7 +3558,7 @@ fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeSlice(block, src, array_ptr, start, end, null, .unneeded); } -fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3576,7 +3580,7 @@ fn zirSwitchCapture( inst: Zir.Inst.Index, is_multi: bool, is_ref: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3595,7 +3599,7 @@ fn zirSwitchCaptureElse( block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3614,7 +3618,7 @@ fn zirSwitchBlock( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3647,7 +3651,7 @@ fn zirSwitchBlockMulti( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3684,7 +3688,7 @@ fn analyzeSwitch( multi_cases_len: usize, switch_inst: Zir.Inst.Index, src_node_offset: i32, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const gpa = sema.gpa; const mod = sema.mod; @@ -4350,20 +4354,23 @@ fn resolveSwitchItemVal( switch_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, range_expand: Module.SwitchProngSrc.RangeExpand, -) InnerError!TypedValue { +) CompileError!TypedValue { const item = sema.resolveInst(item_ref); - // We have to avoid the other helper functions here because we cannot construct a LazySrcLoc - // because we only have the switch AST node. Only if we know for sure we need to report - // a compile error do we resolve the full source locations. - if (item.value()) |val| { - if (val.isUndef()) { - const src = switch_prong_src.resolve(sema.gpa, block.src_decl, switch_node_offset, range_expand); - return sema.failWithUseOfUndef(block, src); - } + // Constructing a LazySrcLoc is costly because we only have the switch AST node. + // Only if we know for sure we need to report a compile error do we resolve the + // full source locations. + if (sema.resolveConstValue(block, .unneeded, item)) |val| { return TypedValue{ .ty = item.ty, .val = val }; + } else |err| switch (err) { + error.NeededSourceLocation => { + const src = switch_prong_src.resolve(sema.gpa, block.src_decl, switch_node_offset, range_expand); + return TypedValue{ + .ty = item.ty, + .val = try sema.resolveConstValue(block, src, item), + }; + }, + else => |e| return e, } - const src = switch_prong_src.resolve(sema.gpa, block.src_decl, switch_node_offset, range_expand); - return sema.failWithNeededComptime(block, src); } fn validateSwitchRange( @@ -4374,7 +4381,7 @@ fn validateSwitchRange( last_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, -) InnerError!void { +) CompileError!void { const first_val = (try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first)).val; const last_val = (try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last)).val; const maybe_prev_src = try range_set.add(first_val, last_val, switch_prong_src); @@ -4388,7 +4395,7 @@ fn validateSwitchItem( item_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, -) InnerError!void { +) CompileError!void { const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; const maybe_prev_src = try range_set.add(item_val, item_val, switch_prong_src); return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); @@ -4401,7 +4408,7 @@ fn validateSwitchItemEnum( item_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, -) InnerError!void { +) CompileError!void { const mod = sema.mod; const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); const field_index = item_tv.ty.enumTagFieldIndex(item_tv.val) orelse { @@ -4435,7 +4442,7 @@ fn validateSwitchDupe( maybe_prev_src: ?Module.SwitchProngSrc, switch_prong_src: Module.SwitchProngSrc, src_node_offset: i32, -) InnerError!void { +) CompileError!void { const prev_prong_src = maybe_prev_src orelse return; const mod = sema.mod; const gpa = sema.gpa; @@ -4469,7 +4476,7 @@ fn validateSwitchItemBool( item_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, -) InnerError!void { +) CompileError!void { const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; if (item_val.toBool()) { true_count.* += 1; @@ -4491,7 +4498,7 @@ fn validateSwitchItemSparse( item_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, -) InnerError!void { +) CompileError!void { const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; const kv = (try seen_values.fetchPut(item_val, switch_prong_src)) orelse return; return sema.validateSwitchDupe(block, kv.value, switch_prong_src, src_node_offset); @@ -4503,7 +4510,7 @@ fn validateSwitchNoRange( ranges_len: u32, operand_ty: Type, src_node_offset: i32, -) InnerError!void { +) CompileError!void { if (ranges_len == 0) return; @@ -4530,7 +4537,7 @@ fn validateSwitchNoRange( return sema.mod.failWithOwnedErrorMsg(&block.base, msg); } -fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; _ = extra; @@ -4539,7 +4546,7 @@ fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, src, "TODO implement zirHasField", .{}); } -fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -4562,7 +4569,7 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return Air.Inst.Ref.bool_false; } -fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4587,13 +4594,13 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.addType(file_root_decl.ty); } -fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; _ = inst; return sema.mod.fail(&block.base, sema.src, "TODO implement zirRetErrValueCode", .{}); } -fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4602,7 +4609,7 @@ fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.mod.fail(&block.base, sema.src, "TODO implement zirShl", .{}); } -fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4615,7 +4622,7 @@ fn zirBitwise( block: *Scope.Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4675,7 +4682,7 @@ fn zirBitwise( return block.addBinOp(air_tag, casted_lhs, casted_rhs); } -fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4683,7 +4690,7 @@ fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.fail(&block.base, sema.src, "TODO implement zirBitNot", .{}); } -fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4691,7 +4698,7 @@ fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayCat", .{}); } -fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4704,7 +4711,7 @@ fn zirNegate( block: *Scope.Block, inst: Zir.Inst.Index, tag_override: Zir.Inst.Tag, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4718,7 +4725,7 @@ fn zirNegate( return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src); } -fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4738,7 +4745,7 @@ fn zirOverflowArithmetic( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4757,7 +4764,7 @@ fn analyzeArithmetic( src: LazySrcLoc, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); @@ -4867,7 +4874,7 @@ fn analyzeArithmetic( return block.addBinOp(air_tag, casted_lhs, casted_rhs); } -fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4882,7 +4889,7 @@ fn zirAsm( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4966,7 +4973,7 @@ fn zirCmp( block: *Scope.Block, inst: Zir.Inst.Index, op: std.math.CompareOperator, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5091,7 +5098,7 @@ fn zirCmp( return block.addBinOp(tag, casted_lhs, casted_rhs); } -fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); @@ -5100,7 +5107,7 @@ fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.addIntUnsigned(Type.initTag(.comptime_int), abi_size); } -fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); @@ -5113,7 +5120,7 @@ fn zirThis( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirThis", .{}); } @@ -5122,7 +5129,7 @@ fn zirRetAddr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirRetAddr", .{}); } @@ -5131,12 +5138,12 @@ fn zirBuiltinSrc( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinSrc", .{}); } -fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); @@ -5179,7 +5186,7 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } } -fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; @@ -5187,7 +5194,7 @@ fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.addType(operand.ty); } -fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_ptr = sema.resolveInst(inst_data.operand); @@ -5195,13 +5202,13 @@ fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.addType(elem_ty); } -fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirTypeofLog2IntType", .{}); } -fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirLog2IntType", .{}); @@ -5211,7 +5218,7 @@ fn zirTypeofPeer( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5230,7 +5237,7 @@ fn zirTypeofPeer( return sema.addType(result_type); } -fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5256,7 +5263,7 @@ fn zirBoolOp( block: *Scope.Block, inst: Zir.Inst.Index, is_bool_or: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5295,7 +5302,7 @@ fn zirBoolBr( parent_block: *Scope.Block, inst: Zir.Inst.Index, is_bool_or: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5369,7 +5376,7 @@ fn zirIsNonNull( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5383,7 +5390,7 @@ fn zirIsNonNullPtr( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5394,7 +5401,7 @@ fn zirIsNonNullPtr( return sema.analyzeIsNull(block, src, loaded, true); } -fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5403,7 +5410,7 @@ fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeIsNonErr(block, inst_data.src(), operand); } -fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5418,7 +5425,7 @@ fn zirCondbr( sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Zir.Inst.Index { +) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5461,7 +5468,7 @@ fn zirCondbr( return always_noreturn; } -fn zirUnreachable(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { +fn zirUnreachable(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5482,7 +5489,7 @@ fn zirRetErrValue( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Zir.Inst.Index { +) CompileError!Zir.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const err_name = inst_data.get(sema.code); const src = inst_data.src(); @@ -5507,7 +5514,7 @@ fn zirRetCoerce( block: *Scope.Block, inst: Zir.Inst.Index, need_coercion: bool, -) InnerError!Zir.Inst.Index { +) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5518,7 +5525,7 @@ fn zirRetCoerce( return sema.analyzeRet(block, operand, src, need_coercion); } -fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { +fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5535,7 +5542,7 @@ fn analyzeRet( operand: Air.Inst.Ref, src: LazySrcLoc, need_coercion: bool, -) InnerError!Zir.Inst.Index { +) CompileError!Zir.Inst.Index { if (block.inlining) |inlining| { // We are inlining a function call; rewrite the `ret` as a `break`. try inlining.merges.results.append(sema.gpa, operand); @@ -5564,7 +5571,7 @@ fn floatOpAllowed(tag: Zir.Inst.Tag) bool { }; } -fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5585,7 +5592,7 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.addType(ty); } -fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5639,7 +5646,7 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.addType(ty); } -fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5653,13 +5660,13 @@ fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In }); } -fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnionInitPtr", .{}); } -fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { +fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); @@ -5772,7 +5779,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return mod.fail(&block.base, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{}); } -fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { +fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5780,7 +5787,7 @@ fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ return sema.mod.fail(&block.base, src, "TODO: Sema.zirStructInitAnon", .{}); } -fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { +fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5788,7 +5795,7 @@ fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInit", .{}); } -fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { +fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5796,13 +5803,13 @@ fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_r return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInitAnon", .{}); } -fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldTypeRef", .{}); } -fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const src = inst_data.src(); @@ -5824,7 +5831,7 @@ fn zirErrorReturnTrace( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorReturnTrace", .{}); } @@ -5833,7 +5840,7 @@ fn zirFrame( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrame", .{}); } @@ -5842,84 +5849,84 @@ fn zirFrameAddress( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameAddress", .{}); } -fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignOf", .{}); } -fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBoolToInt", .{}); } -fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirEmbedFile", .{}); } -fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorName", .{}); } -fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnaryMath", .{}); } -fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTagName", .{}); } -fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReify", .{}); } -fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTypeName", .{}); } -fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameType", .{}); } -fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameSize", .{}); } -fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFloatToInt", .{}); } -fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirIntToFloat", .{}); } -fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5982,199 +5989,199 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addTyOp(.bitcast, type_res, operand_coerced); } -fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrSetCast", .{}); } -fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPtrCast", .{}); } -fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTruncate", .{}); } -fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignCast", .{}); } -fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirClz", .{}); } -fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCtz", .{}); } -fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPopCount", .{}); } -fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirByteSwap", .{}); } -fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitReverse", .{}); } -fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivExact", .{}); } -fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivFloor", .{}); } -fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivTrunc", .{}); } -fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMod", .{}); } -fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirRem", .{}); } -fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShlExact", .{}); } -fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShrExact", .{}); } -fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitOffsetOf", .{}); } -fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirOffsetOf", .{}); } -fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCmpxchg", .{}); } -fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirSplat", .{}); } -fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReduce", .{}); } -fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShuffle", .{}); } -fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicLoad", .{}); } -fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicRmw", .{}); } -fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicStore", .{}); } -fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMulAdd", .{}); } -fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinCall", .{}); } -fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldPtrType", .{}); } -fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldParentPtr", .{}); } -fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy", .{}); } -fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset", .{}); } -fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinAsyncCall", .{}); } -fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirResume", .{}); @@ -6185,7 +6192,7 @@ fn zirAwait( block: *Scope.Block, inst: Zir.Inst.Index, is_nosuspend: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -6197,7 +6204,7 @@ fn zirVarExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const src = sema.src; const ty_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at type @@ -6263,7 +6270,7 @@ fn zirFuncExtended( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -6330,7 +6337,7 @@ fn zirCUndef( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCUndef", .{}); @@ -6340,7 +6347,7 @@ fn zirCInclude( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCInclude", .{}); @@ -6350,7 +6357,7 @@ fn zirCDefine( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCDefine", .{}); @@ -6360,7 +6367,7 @@ fn zirWasmMemorySize( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemorySize", .{}); @@ -6370,7 +6377,7 @@ fn zirWasmMemoryGrow( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemoryGrow", .{}); @@ -6380,7 +6387,7 @@ fn zirBuiltinExtern( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinExtern", .{}); @@ -6556,7 +6563,7 @@ fn namedFieldPtr( object_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; @@ -6706,7 +6713,7 @@ fn analyzeNamespaceLookup( src: LazySrcLoc, namespace: *Scope.Namespace, decl_name: []const u8, -) InnerError!?Air.Inst.Ref { +) CompileError!?Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; if (try sema.lookupInNamespace(namespace, decl_name)) |decl| { @@ -6734,7 +6741,7 @@ fn analyzeStructFieldPtr( field_name: []const u8, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; assert(unresolved_struct_ty.zigTypeTag() == .Struct); @@ -6769,7 +6776,7 @@ fn analyzeUnionFieldPtr( field_name: []const u8, field_name_src: LazySrcLoc, unresolved_union_ty: Type, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; assert(unresolved_union_ty.zigTypeTag() == .Union); @@ -6805,7 +6812,7 @@ fn elemPtr( array_ptr: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const array_ty = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, array_ptr.src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -6832,7 +6839,7 @@ fn elemPtrArray( array_ptr: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { if (array_ptr.value()) |array_ptr_val| { if (elem_index.value()) |index_val| { // Both array pointer and index are compile-time known. @@ -6859,7 +6866,7 @@ fn coerce( dest_type: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { if (dest_type.tag() == .var_args_param) { return sema.coerceVarArgParam(block, inst, inst_src); } @@ -7041,7 +7048,7 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult return .no_match; } -fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) InnerError!?Air.Inst.Index { +fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) CompileError!?Air.Inst.Index { const val = inst.value() orelse return null; const src_zig_tag = inst.ty.zigTypeTag(); const dst_zig_tag = dest_type.zigTypeTag(); @@ -7153,7 +7160,7 @@ fn bitcast( dest_type: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { // Keep the comptime Value representation; take the new type. return sema.addConstant(dest_type, val); @@ -7163,7 +7170,7 @@ fn bitcast( return block.addTyOp(.bitcast, dest_type, inst); } -fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) InnerError!Air.Inst.Ref { +fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) CompileError!Air.Inst.Ref { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7179,12 +7186,12 @@ fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); } -fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Ref { +fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) CompileError!Air.Inst.Ref { const decl_ref = try sema.analyzeDeclRef(block, src, decl); return sema.analyzeLoad(block, src, decl_ref, src); } -fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Ref { +fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) CompileError!Air.Inst.Ref { try sema.mod.declareDeclDependency(sema.owner_decl, decl); sema.mod.ensureDeclAnalyzed(decl) catch |err| { if (sema.func) |func| { @@ -7205,7 +7212,7 @@ fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl ); } -fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!Air.Inst.Ref { +fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) CompileError!Air.Inst.Ref { const variable = tv.val.castTag(.variable).?.data; const ty = try Module.simplePtrType(sema.arena, tv.ty, variable.is_mutable, .One); @@ -7233,7 +7240,7 @@ fn analyzeRef( block: *Scope.Block, src: LazySrcLoc, operand: Air.Inst.Ref, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const ptr_type = try sema.mod.simplePtrType(sema.arena, operand.ty, false, .One); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |val| { @@ -7253,7 +7260,7 @@ fn analyzeLoad( src: LazySrcLoc, ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const ptr_ty = sema.getTypeOf(ptr); const elem_ty = switch (ptr_ty.zigTypeTag()) { .Pointer => ptr_ty.elemType(), @@ -7276,7 +7283,7 @@ fn analyzeIsNull( src: LazySrcLoc, operand: Air.Inst.Ref, invert_logic: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const result_ty = Type.initTag(.bool); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |opt_val| { if (opt_val.isUndef()) { @@ -7300,7 +7307,7 @@ fn analyzeIsNonErr( block: *Scope.Block, src: LazySrcLoc, operand: Air.Inst.Ref, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const ot = operand.ty.zigTypeTag(); if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true; if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; @@ -7329,7 +7336,7 @@ fn analyzeSlice( end_opt: ?Air.Inst.Index, sentinel_opt: ?Air.Inst.Index, sentinel_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const ptr_child = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -7405,7 +7412,7 @@ fn cmpNumeric( op: std.math.CompareOperator, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const lhs_ty = sema.getTypeOf(lhs); const rhs_ty = sema.getTypeOf(rhs); @@ -7746,7 +7753,7 @@ fn resolvePeerTypes( return chosen.ty; } -fn resolveTypeFields(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Type) InnerError!Type { +fn resolveTypeFields(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Type) CompileError!Type { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; @@ -7798,7 +7805,7 @@ fn resolveBuiltinTypeFields( block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!Type { +) CompileError!Type { const resolved_ty = try sema.getBuiltinType(block, src, name); return sema.resolveTypeFields(block, src, resolved_ty); } @@ -7808,7 +7815,7 @@ fn getBuiltin( block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const mod = sema.mod; const std_pkg = mod.root_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; @@ -7834,7 +7841,7 @@ fn getBuiltinType( block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!Type { +) CompileError!Type { const ty_inst = try sema.getBuiltin(block, src, name); return sema.resolveAirAsType(block, src, ty_inst); } @@ -7848,7 +7855,7 @@ fn typeHasOnePossibleValue( block: *Scope.Block, src: LazySrcLoc, starting_type: Type, -) InnerError!?Value { +) CompileError!?Value { var ty = starting_type; while (true) switch (ty.tag()) { .f16, @@ -7986,7 +7993,7 @@ fn typeHasOnePossibleValue( }; } -fn getAstTree(sema: *Sema, block: *Scope.Block) InnerError!*const std.zig.ast.Tree { +fn getAstTree(sema: *Sema, block: *Scope.Block) CompileError!*const std.zig.ast.Tree { return block.src_decl.namespace.file_scope.getTree(sema.gpa) catch |err| { log.err("unable to load AST to report compile error: {s}", .{@errorName(err)}); return error.AnalysisFail; @@ -8166,15 +8173,15 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } -fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) InnerError!Air.Inst.Ref { +fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { return sema.addConstant(ty, try Value.Tag.int_u64.create(sema.arena, int)); } -fn addConstUndef(sema: *Sema, ty: Type) InnerError!Air.Inst.Ref { +fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { return sema.addConstant(ty, Value.initTag(.undef)); } -fn addConstant(sema: *Sema, ty: Type, val: Value) InnerError!Air.Inst.Ref { +fn addConstant(sema: *Sema, ty: Type, val: Value) CompileError!Air.Inst.Ref { const gpa = sema.gpa; const ty_inst = try sema.addType(ty); try sema.air_values.append(gpa, val); -- cgit v1.2.3 From eadbee2041bba1cd03b24d8f30161025af8e3590 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 15 Jul 2021 15:52:06 -0700 Subject: stage2: first pass at printing AIR/Liveness to text * some instructions are not implemented yet * fix off-by-1 in Air.getMainBody * Compilation: use `@import("builtin")` rather than `std.builtin` for the values that are different for different build configurations. * Sema: avoid calling `addType` in between air_instructions.ensureUnusedCapacity and corresponding appendAssumeCapacity because it can possibly add an instruction. * Value: functions print their names --- BRANCH_TODO | 566 ---------------------------------------------------- src/Air.zig | 4 +- src/Compilation.zig | 9 +- src/Module.zig | 3 +- src/Sema.zig | 3 +- src/print_air.zig | 294 +++++++++++++++++++++++++++ src/value.zig | 2 +- 7 files changed, 307 insertions(+), 574 deletions(-) delete mode 100644 BRANCH_TODO create mode 100644 src/print_air.zig (limited to 'src/Compilation.zig') diff --git a/BRANCH_TODO b/BRANCH_TODO deleted file mode 100644 index 9055cda307..0000000000 --- a/BRANCH_TODO +++ /dev/null @@ -1,566 +0,0 @@ - * be sure to test debug info of parameters - - - pub fn specialOperandDeaths(self: Inst) bool { - return (self.deaths & (1 << deaths_bits)) != 0; - } - - /// Returns `null` if runtime-known. - /// Should be called by codegen, not by Sema. Sema functions should call - /// `resolvePossiblyUndefinedValue` or `resolveDefinedValue` instead. - /// TODO audit Sema code for violations to the above guidance. - pub fn value(base: *Inst) ?Value { - if (base.ty.onePossibleValue()) |opv| return opv; - - const inst = base.castTag(.constant) orelse return null; - return inst.val; - } - - - -/// For debugging purposes, prints a function representation to stderr. -pub fn dumpFn(old_module: Module, module_fn: *Module.Fn) void { - const allocator = old_module.gpa; - var ctx: DumpAir = .{ - .allocator = allocator, - .arena = std.heap.ArenaAllocator.init(allocator), - .old_module = &old_module, - .module_fn = module_fn, - .indent = 2, - .inst_table = DumpAir.InstTable.init(allocator), - .partial_inst_table = DumpAir.InstTable.init(allocator), - .const_table = DumpAir.InstTable.init(allocator), - }; - defer ctx.inst_table.deinit(); - defer ctx.partial_inst_table.deinit(); - defer ctx.const_table.deinit(); - defer ctx.arena.deinit(); - - switch (module_fn.state) { - .queued => std.debug.print("(queued)", .{}), - .inline_only => std.debug.print("(inline_only)", .{}), - .in_progress => std.debug.print("(in_progress)", .{}), - .sema_failure => std.debug.print("(sema_failure)", .{}), - .dependency_failure => std.debug.print("(dependency_failure)", .{}), - .success => { - const writer = std.io.getStdErr().writer(); - ctx.dump(module_fn.body, writer) catch @panic("failed to dump AIR"); - }, - } -} - -const DumpAir = struct { - allocator: *std.mem.Allocator, - arena: std.heap.ArenaAllocator, - old_module: *const Module, - module_fn: *Module.Fn, - indent: usize, - inst_table: InstTable, - partial_inst_table: InstTable, - const_table: InstTable, - next_index: usize = 0, - next_partial_index: usize = 0, - next_const_index: usize = 0, - - const InstTable = std.AutoArrayHashMap(*Inst, usize); - - /// TODO: Improve this code to include a stack of Body and store the instructions - /// in there. Now we are putting all the instructions in a function local table, - /// however instructions that are in a Body can be thown away when the Body ends. - fn dump(dtz: *DumpAir, body: Body, writer: std.fs.File.Writer) !void { - // First pass to pre-populate the table so that we can show even invalid references. - // Must iterate the same order we iterate the second time. - // We also look for constants and put them in the const_table. - try dtz.fetchInstsAndResolveConsts(body); - - std.debug.print("Module.Function(name={s}):\n", .{dtz.module_fn.owner_decl.name}); - - var it = dtz.const_table.iterator(); - while (it.next()) |entry| { - const constant = entry.key_ptr.*.castTag(.constant).?; - try writer.print(" @{d}: {} = {};\n", .{ - entry.value_ptr.*, constant.base.ty, constant.val, - }); - } - - return dtz.dumpBody(body, writer); - } - - fn fetchInstsAndResolveConsts(dtz: *DumpAir, body: Body) error{OutOfMemory}!void { - for (body.instructions) |inst| { - try dtz.inst_table.put(inst, dtz.next_index); - dtz.next_index += 1; - switch (inst.tag) { - .alloc, - .retvoid, - .unreach, - .breakpoint, - .dbg_stmt, - .arg, - => {}, - - .ref, - .ret, - .bitcast, - .not, - .is_non_null, - .is_non_null_ptr, - .is_null, - .is_null_ptr, - .is_err, - .is_non_err, - .is_err_ptr, - .is_non_err_ptr, - .ptrtoint, - .floatcast, - .intcast, - .load, - .optional_payload, - .optional_payload_ptr, - .wrap_optional, - .wrap_errunion_payload, - .wrap_errunion_err, - .unwrap_errunion_payload, - .unwrap_errunion_err, - .unwrap_errunion_payload_ptr, - .unwrap_errunion_err_ptr, - => { - const un_op = inst.cast(Inst.UnOp).?; - try dtz.findConst(un_op.operand); - }, - - .add, - .addwrap, - .sub, - .subwrap, - .mul, - .mulwrap, - .div, - .cmp_lt, - .cmp_lte, - .cmp_eq, - .cmp_gte, - .cmp_gt, - .cmp_neq, - .store, - .bool_and, - .bool_or, - .bit_and, - .bit_or, - .xor, - => { - const bin_op = inst.cast(Inst.BinOp).?; - try dtz.findConst(bin_op.lhs); - try dtz.findConst(bin_op.rhs); - }, - - .br => { - const br = inst.castTag(.br).?; - try dtz.findConst(&br.block.base); - try dtz.findConst(br.operand); - }, - - .br_block_flat => { - const br_block_flat = inst.castTag(.br_block_flat).?; - try dtz.findConst(&br_block_flat.block.base); - try dtz.fetchInstsAndResolveConsts(br_block_flat.body); - }, - - .br_void => { - const br_void = inst.castTag(.br_void).?; - try dtz.findConst(&br_void.block.base); - }, - - .block => { - const block = inst.castTag(.block).?; - try dtz.fetchInstsAndResolveConsts(block.body); - }, - - .condbr => { - const condbr = inst.castTag(.condbr).?; - try dtz.findConst(condbr.condition); - try dtz.fetchInstsAndResolveConsts(condbr.then_body); - try dtz.fetchInstsAndResolveConsts(condbr.else_body); - }, - .switchbr => { - const switchbr = inst.castTag(.switchbr).?; - try dtz.findConst(switchbr.target); - try dtz.fetchInstsAndResolveConsts(switchbr.else_body); - for (switchbr.cases) |case| { - try dtz.fetchInstsAndResolveConsts(case.body); - } - }, - - .loop => { - const loop = inst.castTag(.loop).?; - try dtz.fetchInstsAndResolveConsts(loop.body); - }, - .call => { - const call = inst.castTag(.call).?; - try dtz.findConst(call.func); - for (call.args) |arg| { - try dtz.findConst(arg); - } - }, - .struct_field_ptr => { - const struct_field_ptr = inst.castTag(.struct_field_ptr).?; - try dtz.findConst(struct_field_ptr.struct_ptr); - }, - - // TODO fill out this debug printing - .assembly, - .constant, - .varptr, - => {}, - } - } - } - - fn dumpBody(dtz: *DumpAir, body: Body, writer: std.fs.File.Writer) (std.fs.File.WriteError || error{OutOfMemory})!void { - for (body.instructions) |inst| { - const my_index = dtz.next_partial_index; - try dtz.partial_inst_table.put(inst, my_index); - dtz.next_partial_index += 1; - - try writer.writeByteNTimes(' ', dtz.indent); - try writer.print("%{d}: {} = {s}(", .{ - my_index, inst.ty, @tagName(inst.tag), - }); - switch (inst.tag) { - .alloc, - .retvoid, - .unreach, - .breakpoint, - .dbg_stmt, - => try writer.writeAll(")\n"), - - .ref, - .ret, - .bitcast, - .not, - .is_non_null, - .is_non_null_ptr, - .is_null, - .is_null_ptr, - .is_err, - .is_err_ptr, - .is_non_err, - .is_non_err_ptr, - .ptrtoint, - .floatcast, - .intcast, - .load, - .optional_payload, - .optional_payload_ptr, - .wrap_optional, - .wrap_errunion_err, - .wrap_errunion_payload, - .unwrap_errunion_err, - .unwrap_errunion_payload, - .unwrap_errunion_payload_ptr, - .unwrap_errunion_err_ptr, - => { - const un_op = inst.cast(Inst.UnOp).?; - const kinky = try dtz.writeInst(writer, un_op.operand); - if (kinky != null) { - try writer.writeAll(") // Instruction does not dominate all uses!\n"); - } else { - try writer.writeAll(")\n"); - } - }, - - .add, - .addwrap, - .sub, - .subwrap, - .mul, - .mulwrap, - .div, - .cmp_lt, - .cmp_lte, - .cmp_eq, - .cmp_gte, - .cmp_gt, - .cmp_neq, - .store, - .bool_and, - .bool_or, - .bit_and, - .bit_or, - .xor, - => { - const bin_op = inst.cast(Inst.BinOp).?; - - const lhs_kinky = try dtz.writeInst(writer, bin_op.lhs); - try writer.writeAll(", "); - const rhs_kinky = try dtz.writeInst(writer, bin_op.rhs); - - if (lhs_kinky != null or rhs_kinky != null) { - try writer.writeAll(") // Instruction does not dominate all uses!"); - if (lhs_kinky) |lhs| { - try writer.print(" %{d}", .{lhs}); - } - if (rhs_kinky) |rhs| { - try writer.print(" %{d}", .{rhs}); - } - try writer.writeAll("\n"); - } else { - try writer.writeAll(")\n"); - } - }, - - .arg => { - const arg = inst.castTag(.arg).?; - try writer.print("{s})\n", .{arg.name}); - }, - - .br => { - const br = inst.castTag(.br).?; - - const lhs_kinky = try dtz.writeInst(writer, &br.block.base); - try writer.writeAll(", "); - const rhs_kinky = try dtz.writeInst(writer, br.operand); - - if (lhs_kinky != null or rhs_kinky != null) { - try writer.writeAll(") // Instruction does not dominate all uses!"); - if (lhs_kinky) |lhs| { - try writer.print(" %{d}", .{lhs}); - } - if (rhs_kinky) |rhs| { - try writer.print(" %{d}", .{rhs}); - } - try writer.writeAll("\n"); - } else { - try writer.writeAll(")\n"); - } - }, - - .br_block_flat => { - const br_block_flat = inst.castTag(.br_block_flat).?; - const block_kinky = try dtz.writeInst(writer, &br_block_flat.block.base); - if (block_kinky != null) { - try writer.writeAll(", { // Instruction does not dominate all uses!\n"); - } else { - try writer.writeAll(", {\n"); - } - - const old_indent = dtz.indent; - dtz.indent += 2; - try dtz.dumpBody(br_block_flat.body, writer); - dtz.indent = old_indent; - - try writer.writeByteNTimes(' ', dtz.indent); - try writer.writeAll("})\n"); - }, - - .br_void => { - const br_void = inst.castTag(.br_void).?; - const kinky = try dtz.writeInst(writer, &br_void.block.base); - if (kinky) |_| { - try writer.writeAll(") // Instruction does not dominate all uses!\n"); - } else { - try writer.writeAll(")\n"); - } - }, - - .block => { - const block = inst.castTag(.block).?; - - try writer.writeAll("{\n"); - - const old_indent = dtz.indent; - dtz.indent += 2; - try dtz.dumpBody(block.body, writer); - dtz.indent = old_indent; - - try writer.writeByteNTimes(' ', dtz.indent); - try writer.writeAll("})\n"); - }, - - .condbr => { - const condbr = inst.castTag(.condbr).?; - - const condition_kinky = try dtz.writeInst(writer, condbr.condition); - if (condition_kinky != null) { - try writer.writeAll(", { // Instruction does not dominate all uses!\n"); - } else { - try writer.writeAll(", {\n"); - } - - const old_indent = dtz.indent; - dtz.indent += 2; - try dtz.dumpBody(condbr.then_body, writer); - - try writer.writeByteNTimes(' ', old_indent); - try writer.writeAll("}, {\n"); - - try dtz.dumpBody(condbr.else_body, writer); - dtz.indent = old_indent; - - try writer.writeByteNTimes(' ', old_indent); - try writer.writeAll("})\n"); - }, - - .switchbr => { - const switchbr = inst.castTag(.switchbr).?; - - const condition_kinky = try dtz.writeInst(writer, switchbr.target); - if (condition_kinky != null) { - try writer.writeAll(", { // Instruction does not dominate all uses!\n"); - } else { - try writer.writeAll(", {\n"); - } - const old_indent = dtz.indent; - - if (switchbr.else_body.instructions.len != 0) { - dtz.indent += 2; - try dtz.dumpBody(switchbr.else_body, writer); - - try writer.writeByteNTimes(' ', old_indent); - try writer.writeAll("}, {\n"); - dtz.indent = old_indent; - } - for (switchbr.cases) |case| { - dtz.indent += 2; - try dtz.dumpBody(case.body, writer); - - try writer.writeByteNTimes(' ', old_indent); - try writer.writeAll("}, {\n"); - dtz.indent = old_indent; - } - - try writer.writeByteNTimes(' ', old_indent); - try writer.writeAll("})\n"); - }, - - .loop => { - const loop = inst.castTag(.loop).?; - - try writer.writeAll("{\n"); - - const old_indent = dtz.indent; - dtz.indent += 2; - try dtz.dumpBody(loop.body, writer); - dtz.indent = old_indent; - - try writer.writeByteNTimes(' ', dtz.indent); - try writer.writeAll("})\n"); - }, - - .call => { - const call = inst.castTag(.call).?; - - const args_kinky = try dtz.allocator.alloc(?usize, call.args.len); - defer dtz.allocator.free(args_kinky); - std.mem.set(?usize, args_kinky, null); - var any_kinky_args = false; - - const func_kinky = try dtz.writeInst(writer, call.func); - - for (call.args) |arg, i| { - try writer.writeAll(", "); - - args_kinky[i] = try dtz.writeInst(writer, arg); - any_kinky_args = any_kinky_args or args_kinky[i] != null; - } - - if (func_kinky != null or any_kinky_args) { - try writer.writeAll(") // Instruction does not dominate all uses!"); - if (func_kinky) |func_index| { - try writer.print(" %{d}", .{func_index}); - } - for (args_kinky) |arg_kinky| { - if (arg_kinky) |arg_index| { - try writer.print(" %{d}", .{arg_index}); - } - } - try writer.writeAll("\n"); - } else { - try writer.writeAll(")\n"); - } - }, - - .struct_field_ptr => { - const struct_field_ptr = inst.castTag(.struct_field_ptr).?; - const kinky = try dtz.writeInst(writer, struct_field_ptr.struct_ptr); - if (kinky != null) { - try writer.print("{d}) // Instruction does not dominate all uses!\n", .{ - struct_field_ptr.field_index, - }); - } else { - try writer.print("{d})\n", .{struct_field_ptr.field_index}); - } - }, - - // TODO fill out this debug printing - .assembly, - .constant, - .varptr, - => { - try writer.writeAll("!TODO!)\n"); - }, - } - } - } - - fn writeInst(dtz: *DumpAir, writer: std.fs.File.Writer, inst: *Inst) !?usize { - if (dtz.partial_inst_table.get(inst)) |operand_index| { - try writer.print("%{d}", .{operand_index}); - return null; - } else if (dtz.const_table.get(inst)) |operand_index| { - try writer.print("@{d}", .{operand_index}); - return null; - } else if (dtz.inst_table.get(inst)) |operand_index| { - try writer.print("%{d}", .{operand_index}); - return operand_index; - } else { - try writer.writeAll("!BADREF!"); - return null; - } - } - - fn findConst(dtz: *DumpAir, operand: *Inst) !void { - if (operand.tag == .constant) { - try dtz.const_table.put(operand, dtz.next_const_index); - dtz.next_const_index += 1; - } - } -}; - -pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { - const zir_module = scope.namespace(); - const source = zir_module.getSource(mod) catch @panic("dumpInst failed to get source"); - const loc = std.zig.findLineColumn(source, inst.src); - if (inst.tag == .constant) { - std.debug.print("constant ty={} val={} src={s}:{d}:{d}\n", .{ - inst.ty, - inst.castTag(.constant).?.val, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } else if (inst.deaths == 0) { - std.debug.print("{s} ty={} src={s}:{d}:{d}\n", .{ - @tagName(inst.tag), - inst.ty, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } else { - std.debug.print("{s} ty={} deaths={b} src={s}:{d}:{d}\n", .{ - @tagName(inst.tag), - inst.ty, - inst.deaths, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } -} - - /// For debugging purposes. - pub fn dump(func: *Fn, mod: Module) void { - ir.dumpFn(mod, func); - } - diff --git a/src/Air.zig b/src/Air.zig index 60e6e9933d..a8b38b7659 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -374,8 +374,8 @@ pub const Asm = struct { pub fn getMainBody(air: Air) []const Air.Inst.Index { const body_index = air.extra[@enumToInt(ExtraIndex.main_block)]; - const body_len = air.extra[body_index]; - return air.extra[body_index..][0..body_len]; + const extra = air.extraData(Block, body_index); + return air.extra[extra.end..][0..extra.data.body_len]; } pub fn getType(air: Air, inst: Air.Inst.Index) Type { diff --git a/src/Compilation.zig b/src/Compilation.zig index f241ae6b10..50d1f5760e 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1,6 +1,7 @@ const Compilation = @This(); const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -907,7 +908,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation { // comptime conditions ((build_options.have_llvm and comptime std.Target.current.isDarwin()) and // runtime conditions - (use_lld and std.builtin.os.tag == .macos and options.target.isDarwin())); + (use_lld and builtin.os.tag == .macos and options.target.isDarwin())); const sysroot = blk: { if (options.sysroot) |sysroot| { @@ -2026,8 +2027,10 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); defer liveness.deinit(gpa); - if (std.builtin.mode == .Debug and self.verbose_air) { - @panic("TODO implement dumping AIR and liveness"); + if (builtin.mode == .Debug and self.verbose_air) { + std.debug.print("# Begin Function AIR: {s}:\n", .{decl.name}); + @import("print_air.zig").dump(gpa, air, liveness); + std.debug.print("# End Function AIR: {s}:\n", .{decl.name}); } assert(decl.ty.hasCodeGenBits()); diff --git a/src/Module.zig b/src/Module.zig index fb514ccbd2..f452824d33 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -3551,7 +3551,8 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) SemaError!Air { try sema.analyzeFnBody(&inner_block, func.zir_body_inst); // Copy the block into place and mark that as the main block. - try sema.air_extra.ensureUnusedCapacity(gpa, inner_block.instructions.items.len + 1); + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + + inner_block.instructions.items.len); const main_block_index = sema.addExtraAssumeCapacity(Air.Block{ .body_len = @intCast(u32, inner_block.instructions.items.len), }); diff --git a/src/Sema.zig b/src/Sema.zig index ac6755d24e..a144ce1d50 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2028,6 +2028,7 @@ fn analyzeBlockBody( refToIndex(coerced_operand).?); // Convert the br operand to a block. + const br_operand_ty_ref = try sema.addType(br_operand_ty); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + coerce_block.instructions.items.len); try sema.air_instructions.ensureUnusedCapacity(gpa, 2); @@ -2037,7 +2038,7 @@ fn analyzeBlockBody( sema.air_instructions.appendAssumeCapacity(.{ .tag = .block, .data = .{ .ty_pl = .{ - .ty = try sema.addType(br_operand_ty), + .ty = br_operand_ty_ref, .payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = @intCast(u32, coerce_block.instructions.items.len), }), diff --git a/src/print_air.zig b/src/print_air.zig new file mode 100644 index 0000000000..44c170a078 --- /dev/null +++ b/src/print_air.zig @@ -0,0 +1,294 @@ +const std = @import("std"); +const Allocator = std.mem.Allocator; +const fmtIntSizeBin = std.fmt.fmtIntSizeBin; + +const Module = @import("Module.zig"); +const Value = @import("value.zig").Value; +const Air = @import("Air.zig"); +const Liveness = @import("Liveness.zig"); + +pub fn dump(gpa: *Allocator, air: Air, liveness: Liveness) void { + const instruction_bytes = air.instructions.len * + // Here we don't use @sizeOf(Air.Inst.Data) because it would include + // the debug safety tag but we want to measure release size. + (@sizeOf(Air.Inst.Tag) + 8); + const extra_bytes = air.extra.len * @sizeOf(u32); + const values_bytes = air.values.len * @sizeOf(Value); + const variables_bytes = air.variables.len * @sizeOf(*Module.Var); + const tomb_bytes = liveness.tomb_bits.len * @sizeOf(usize); + const liveness_extra_bytes = liveness.extra.len * @sizeOf(u32); + const liveness_special_bytes = liveness.special.count() * 8; + const total_bytes = @sizeOf(Air) + instruction_bytes + extra_bytes + + values_bytes * variables_bytes + @sizeOf(Liveness) + liveness_extra_bytes + + liveness_special_bytes + tomb_bytes; + + // zig fmt: off + std.debug.print( + \\# Total AIR+Liveness bytes: {} + \\# AIR Instructions: {d} ({}) + \\# AIR Extra Data: {d} ({}) + \\# AIR Values Bytes: {d} ({}) + \\# AIR Variables Bytes: {d} ({}) + \\# Liveness tomb_bits: {} + \\# Liveness Extra Data: {d} ({}) + \\# Liveness special table: {d} ({}) + \\ + , .{ + fmtIntSizeBin(total_bytes), + air.instructions.len, fmtIntSizeBin(instruction_bytes), + air.extra.len, fmtIntSizeBin(extra_bytes), + air.values.len, fmtIntSizeBin(values_bytes), + air.variables.len, fmtIntSizeBin(variables_bytes), + fmtIntSizeBin(tomb_bytes), + liveness.extra.len, fmtIntSizeBin(liveness_extra_bytes), + liveness.special.count(), fmtIntSizeBin(liveness_special_bytes), + }); + // zig fmt: on + var arena = std.heap.ArenaAllocator.init(gpa); + defer arena.deinit(); + + var writer: Writer = .{ + .gpa = gpa, + .arena = &arena.allocator, + .air = air, + .liveness = liveness, + .indent = 0, + }; + const stream = std.io.getStdErr().writer(); + writer.writeAllConstants(stream) catch return; + writer.writeBody(stream, air.getMainBody()) catch return; +} + +const Writer = struct { + gpa: *Allocator, + arena: *Allocator, + air: Air, + liveness: Liveness, + indent: usize, + + fn writeAllConstants(w: *Writer, s: anytype) @TypeOf(s).Error!void { + for (w.air.instructions.items(.tag)) |tag, i| { + const inst = @intCast(u32, i); + switch (tag) { + .constant, .const_ty => { + try s.writeByteNTimes(' ', w.indent); + try s.print("%{d} ", .{inst}); + try w.writeInst(s, inst); + try s.writeAll(")\n"); + }, + else => continue, + } + } + } + + fn writeBody(w: *Writer, s: anytype, body: []const Air.Inst.Index) @TypeOf(s).Error!void { + for (body) |inst| { + try s.writeByteNTimes(' ', w.indent); + try s.print("%{d} ", .{inst}); + try w.writeInst(s, inst); + if (w.liveness.isUnused(inst)) { + try s.writeAll(") unused\n"); + } else { + try s.writeAll("\n"); + } + } + } + + fn writeInst(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const tags = w.air.instructions.items(.tag); + const tag = tags[inst]; + try s.print("= {s}(", .{@tagName(tags[inst])}); + switch (tag) { + .arg => try w.writeTyStr(s, inst), + + .add, + .addwrap, + .sub, + .subwrap, + .mul, + .mulwrap, + .div, + .bit_and, + .bit_or, + .xor, + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + .bool_and, + .bool_or, + .store, + => try w.writeBinOp(s, inst), + + .is_null, + .is_non_null, + .is_null_ptr, + .is_non_null_ptr, + .is_err, + .is_non_err, + .is_err_ptr, + .is_non_err_ptr, + .ptrtoint, + .ret, + => try w.writeUnOp(s, inst), + + .breakpoint, + .unreach, + => try w.writeNoOp(s, inst), + + .const_ty, + .alloc, + => try w.writeTy(s, inst), + + .not, + .bitcast, + .load, + .ref, + .floatcast, + .intcast, + .optional_payload, + .optional_payload_ptr, + .wrap_optional, + .unwrap_errunion_payload, + .unwrap_errunion_err, + .unwrap_errunion_payload_ptr, + .unwrap_errunion_err_ptr, + .wrap_errunion_payload, + .wrap_errunion_err, + => try w.writeTyOp(s, inst), + + .block, + .loop, + => try w.writeBlock(s, inst), + + .struct_field_ptr => try w.writeStructFieldPtr(s, inst), + .varptr => try w.writeVarPtr(s, inst), + .constant => try w.writeConstant(s, inst), + .assembly => try w.writeAssembly(s, inst), + .dbg_stmt => try w.writeDbgStmt(s, inst), + .call => try w.writeCall(s, inst), + .br => try w.writeBr(s, inst), + .cond_br => try w.writeCondBr(s, inst), + .switch_br => try w.writeSwitchBr(s, inst), + } + } + + fn writeTyStr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeBinOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeUnOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeNoOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeTy(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const ty = w.air.instructions.items(.data)[inst].ty; + try s.print("{}", .{ty}); + } + + fn writeTyOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeBlock(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeStructFieldPtr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeVarPtr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeConstant(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; + const val = w.air.values[ty_pl.payload]; + try s.print("{}, {}", .{ ty_pl.ty, val }); + } + + fn writeAssembly(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeDbgStmt(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const dbg_stmt = w.air.instructions.items(.data)[inst].dbg_stmt; + try s.print("{d}:{d}", .{ dbg_stmt.line + 1, dbg_stmt.column + 1 }); + } + + fn writeCall(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const pl_op = w.air.instructions.items(.data)[inst].pl_op; + const extra = w.air.extraData(Air.Call, pl_op.payload); + const args = w.air.extra[extra.end..][0..extra.data.args_len]; + try w.writeInstRef(s, pl_op.operand); + try s.writeAll(", ["); + for (args) |arg, i| { + if (i != 0) try s.writeAll(", "); + try w.writeInstRef(s, @intToEnum(Air.Inst.Ref, arg)); + } + try s.writeAll("]"); + } + + fn writeBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeCondBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeSwitchBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeInstRef(w: *Writer, s: anytype, inst: Air.Inst.Ref) @TypeOf(s).Error!void { + var i: usize = @enumToInt(inst); + + if (i < Air.Inst.Ref.typed_value_map.len) { + return s.print("@{}", .{inst}); + } + i -= Air.Inst.Ref.typed_value_map.len; + + return w.writeInstIndex(s, @intCast(Air.Inst.Index, i)); + } + + fn writeInstIndex(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + return s.print("%{d}", .{inst}); + } +}; diff --git a/src/value.zig b/src/value.zig index df3a97b09a..abb2ea7b1e 100644 --- a/src/value.zig +++ b/src/value.zig @@ -573,7 +573,7 @@ pub const Value = extern union { .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", options, out_stream), .int_big_positive => return out_stream.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), .int_big_negative => return out_stream.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), - .function => return out_stream.writeAll("(function)"), + .function => return out_stream.print("(function '{s}')", .{val.castTag(.function).?.data.owner_decl.name}), .extern_fn => return out_stream.writeAll("(extern function)"), .variable => return out_stream.writeAll("(variable)"), .ref_val => { -- cgit v1.2.3 From fe14e339458a578657f3890f00d654a15c84422c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 20 Jul 2021 15:22:37 -0700 Subject: stage2: separate work queue item for functions than decls Previously we had codegen_decl for both constant values as well as function bodies. A recent commit updated the linker backends to add updateFunc as a separate function than updateDecl, and now this commit does the same with work queue tasks. The frontend now distinguishes between function pointers and function bodies. --- src/Compilation.zig | 158 ++++++++++++++++++++++++++++++++++------------- src/Module.zig | 174 +++++++++++++++++++++++++++------------------------- 2 files changed, 203 insertions(+), 129 deletions(-) (limited to 'src/Compilation.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index 50d1f5760e..ea484c2d15 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -169,8 +169,10 @@ pub const CSourceFile = struct { }; const Job = union(enum) { - /// Write the machine code for a Decl to the output file. + /// Write the constant value for a Decl to the output file. codegen_decl: *Module.Decl, + /// Write the machine code for a function to the output file. + codegen_func: *Module.Fn, /// Render the .h file snippet for the Decl. emit_h_decl: *Module.Decl, /// The Decl needs to be analyzed and possibly export itself. @@ -2006,54 +2008,56 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor const module = self.bin_file.options.module.?; assert(decl.has_tv); if (decl.val.castTag(.function)) |payload| { - const func = payload.data; + if (decl.owns_tv) { + const func = payload.data; + + var air = switch (func.state) { + .sema_failure, .dependency_failure => continue, + .queued => module.analyzeFnBody(decl, func) catch |err| switch (err) { + error.AnalysisFail => { + assert(func.state != .in_progress); + continue; + }, + error.OutOfMemory => return error.OutOfMemory, + }, + .in_progress => unreachable, + .inline_only => unreachable, // don't queue work for this + .success => unreachable, // don't queue it twice + }; + defer air.deinit(gpa); + + log.debug("analyze liveness of {s}", .{decl.name}); + var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); + defer liveness.deinit(gpa); + + if (builtin.mode == .Debug and self.verbose_air) { + std.debug.print("# Begin Function AIR: {s}:\n", .{decl.name}); + @import("print_air.zig").dump(gpa, air, liveness); + std.debug.print("# End Function AIR: {s}:\n", .{decl.name}); + } - var air = switch (func.state) { - .queued => module.analyzeFnBody(decl, func) catch |err| switch (err) { + assert(decl.ty.hasCodeGenBits()); + + self.bin_file.updateFunc(module, func, air, liveness) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { - assert(func.state != .in_progress); + decl.analysis = .codegen_failure; continue; }, - error.OutOfMemory => return error.OutOfMemory, - }, - .in_progress => unreachable, - .inline_only => unreachable, // don't queue work for this - .sema_failure, .dependency_failure => continue, - .success => unreachable, // don't queue it twice - }; - defer air.deinit(gpa); - - log.debug("analyze liveness of {s}", .{decl.name}); - var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); - defer liveness.deinit(gpa); - - if (builtin.mode == .Debug and self.verbose_air) { - std.debug.print("# Begin Function AIR: {s}:\n", .{decl.name}); - @import("print_air.zig").dump(gpa, air, liveness); - std.debug.print("# End Function AIR: {s}:\n", .{decl.name}); + else => { + try module.failed_decls.ensureUnusedCapacity(gpa, 1); + module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( + gpa, + decl.srcLoc(), + "unable to codegen: {s}", + .{@errorName(err)}, + )); + decl.analysis = .codegen_failure_retryable; + continue; + }, + }; + continue; } - - assert(decl.ty.hasCodeGenBits()); - - self.bin_file.updateFunc(module, func, air, liveness) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => { - decl.analysis = .codegen_failure; - continue; - }, - else => { - try module.failed_decls.ensureUnusedCapacity(gpa, 1); - module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( - gpa, - decl.srcLoc(), - "unable to codegen: {s}", - .{@errorName(err)}, - )); - decl.analysis = .codegen_failure_retryable; - continue; - }, - }; - continue; } assert(decl.ty.hasCodeGenBits()); @@ -2078,6 +2082,72 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor }; }, }, + .codegen_func => |func| switch (func.owner_decl.analysis) { + .unreferenced => unreachable, + .in_progress => unreachable, + .outdated => unreachable, + + .file_failure, + .sema_failure, + .codegen_failure, + .dependency_failure, + .sema_failure_retryable, + => continue, + + .complete, .codegen_failure_retryable => { + if (build_options.omit_stage2) + @panic("sadly stage2 is omitted from this build to save memory on the CI server"); + switch (func.state) { + .sema_failure, .dependency_failure => continue, + .queued => {}, + .in_progress => unreachable, + .inline_only => unreachable, // don't queue work for this + .success => unreachable, // don't queue it twice + } + + const module = self.bin_file.options.module.?; + const decl = func.owner_decl; + + var air = module.analyzeFnBody(decl, func) catch |err| switch (err) { + error.AnalysisFail => { + assert(func.state != .in_progress); + continue; + }, + error.OutOfMemory => return error.OutOfMemory, + }; + defer air.deinit(gpa); + + log.debug("analyze liveness of {s}", .{decl.name}); + var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); + defer liveness.deinit(gpa); + + if (builtin.mode == .Debug and self.verbose_air) { + std.debug.print("# Begin Function AIR: {s}:\n", .{decl.name}); + @import("print_air.zig").dump(gpa, air, liveness); + std.debug.print("# End Function AIR: {s}:\n", .{decl.name}); + } + + self.bin_file.updateFunc(module, func, air, liveness) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => { + decl.analysis = .codegen_failure; + continue; + }, + else => { + try module.failed_decls.ensureUnusedCapacity(gpa, 1); + module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( + gpa, + decl.srcLoc(), + "unable to codegen: {s}", + .{@errorName(err)}, + )); + decl.analysis = .codegen_failure_retryable; + continue; + }, + }; + continue; + }, + }, .emit_h_decl => |decl| switch (decl.analysis) { .unreferenced => unreachable, .in_progress => unreachable, diff --git a/src/Module.zig b/src/Module.zig index 9fadf67c6f..4930e7846c 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -2902,6 +2902,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { decl.generation = mod.generation; return false; } + log.debug("semaDecl {*} ({s})", .{ decl, decl.name }); var block_scope: Scope.Block = .{ .parent = null, @@ -2938,106 +2939,109 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State); if (decl_tv.val.castTag(.function)) |fn_payload| { - var prev_type_has_bits = false; - var prev_is_inline = false; - var type_changed = true; - - if (decl.has_tv) { - prev_type_has_bits = decl.ty.hasCodeGenBits(); - type_changed = !decl.ty.eql(decl_tv.ty); - if (decl.getFunction()) |prev_func| { - prev_is_inline = prev_func.state == .inline_only; + const func = fn_payload.data; + const owns_tv = func.owner_decl == decl; + if (owns_tv) { + var prev_type_has_bits = false; + var prev_is_inline = false; + var type_changed = true; + + if (decl.has_tv) { + prev_type_has_bits = decl.ty.hasCodeGenBits(); + type_changed = !decl.ty.eql(decl_tv.ty); + if (decl.getFunction()) |prev_func| { + prev_is_inline = prev_func.state == .inline_only; + } + decl.clearValues(gpa); } - decl.clearValues(gpa); - } - - decl.ty = try decl_tv.ty.copy(&decl_arena.allocator); - decl.val = try decl_tv.val.copy(&decl_arena.allocator); - decl.align_val = try align_val.copy(&decl_arena.allocator); - decl.linksection_val = try linksection_val.copy(&decl_arena.allocator); - decl.has_tv = true; - decl.owns_tv = fn_payload.data.owner_decl == decl; - decl_arena_state.* = decl_arena.state; - decl.value_arena = decl_arena_state; - decl.analysis = .complete; - decl.generation = mod.generation; - const is_inline = decl_tv.ty.fnCallingConvention() == .Inline; - if (!is_inline and decl_tv.ty.hasCodeGenBits()) { - // We don't fully codegen the decl until later, but we do need to reserve a global - // offset table index for it. This allows us to codegen decls out of dependency order, - // increasing how many computations can be done in parallel. - try mod.comp.bin_file.allocateDeclIndexes(decl); - try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl }); - if (type_changed and mod.emit_h != null) { - try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); + decl.ty = try decl_tv.ty.copy(&decl_arena.allocator); + decl.val = try decl_tv.val.copy(&decl_arena.allocator); + decl.align_val = try align_val.copy(&decl_arena.allocator); + decl.linksection_val = try linksection_val.copy(&decl_arena.allocator); + decl.has_tv = true; + decl.owns_tv = owns_tv; + decl_arena_state.* = decl_arena.state; + decl.value_arena = decl_arena_state; + decl.analysis = .complete; + decl.generation = mod.generation; + + const is_inline = decl_tv.ty.fnCallingConvention() == .Inline; + if (!is_inline and decl_tv.ty.hasCodeGenBits()) { + // We don't fully codegen the decl until later, but we do need to reserve a global + // offset table index for it. This allows us to codegen decls out of dependency order, + // increasing how many computations can be done in parallel. + try mod.comp.bin_file.allocateDeclIndexes(decl); + try mod.comp.work_queue.writeItem(.{ .codegen_func = func }); + if (type_changed and mod.emit_h != null) { + try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); + } + } else if (!prev_is_inline and prev_type_has_bits) { + mod.comp.bin_file.freeDecl(decl); } - } else if (!prev_is_inline and prev_type_has_bits) { - mod.comp.bin_file.freeDecl(decl); - } - if (decl.is_exported) { - const export_src = src; // TODO make this point at `export` token - if (is_inline) { - return mod.fail(&block_scope.base, export_src, "export of inline function", .{}); + if (decl.is_exported) { + const export_src = src; // TODO make this point at `export` token + if (is_inline) { + return mod.fail(&block_scope.base, export_src, "export of inline function", .{}); + } + // The scope needs to have the decl in it. + try mod.analyzeExport(&block_scope.base, export_src, mem.spanZ(decl.name), decl); } - // The scope needs to have the decl in it. - try mod.analyzeExport(&block_scope.base, export_src, mem.spanZ(decl.name), decl); - } - return type_changed or is_inline != prev_is_inline; - } else { - var type_changed = true; - if (decl.has_tv) { - type_changed = !decl.ty.eql(decl_tv.ty); - decl.clearValues(gpa); + return type_changed or is_inline != prev_is_inline; } + } + var type_changed = true; + if (decl.has_tv) { + type_changed = !decl.ty.eql(decl_tv.ty); + decl.clearValues(gpa); + } - decl.owns_tv = false; - var queue_linker_work = false; - if (decl_tv.val.castTag(.variable)) |payload| { - const variable = payload.data; - if (variable.owner_decl == decl) { - decl.owns_tv = true; - queue_linker_work = true; + decl.owns_tv = false; + var queue_linker_work = false; + if (decl_tv.val.castTag(.variable)) |payload| { + const variable = payload.data; + if (variable.owner_decl == decl) { + decl.owns_tv = true; + queue_linker_work = true; - const copied_init = try variable.init.copy(&decl_arena.allocator); - variable.init = copied_init; - } - } else if (decl_tv.val.castTag(.extern_fn)) |payload| { - const owner_decl = payload.data; - if (decl == owner_decl) { - decl.owns_tv = true; - queue_linker_work = true; - } + const copied_init = try variable.init.copy(&decl_arena.allocator); + variable.init = copied_init; } + } else if (decl_tv.val.castTag(.extern_fn)) |payload| { + const owner_decl = payload.data; + if (decl == owner_decl) { + decl.owns_tv = true; + queue_linker_work = true; + } + } - decl.ty = try decl_tv.ty.copy(&decl_arena.allocator); - decl.val = try decl_tv.val.copy(&decl_arena.allocator); - decl.align_val = try align_val.copy(&decl_arena.allocator); - decl.linksection_val = try linksection_val.copy(&decl_arena.allocator); - decl.has_tv = true; - decl_arena_state.* = decl_arena.state; - decl.value_arena = decl_arena_state; - decl.analysis = .complete; - decl.generation = mod.generation; - - if (queue_linker_work and decl.ty.hasCodeGenBits()) { - try mod.comp.bin_file.allocateDeclIndexes(decl); - try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl }); + decl.ty = try decl_tv.ty.copy(&decl_arena.allocator); + decl.val = try decl_tv.val.copy(&decl_arena.allocator); + decl.align_val = try align_val.copy(&decl_arena.allocator); + decl.linksection_val = try linksection_val.copy(&decl_arena.allocator); + decl.has_tv = true; + decl_arena_state.* = decl_arena.state; + decl.value_arena = decl_arena_state; + decl.analysis = .complete; + decl.generation = mod.generation; - if (type_changed and mod.emit_h != null) { - try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); - } - } + if (queue_linker_work and decl.ty.hasCodeGenBits()) { + try mod.comp.bin_file.allocateDeclIndexes(decl); + try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl }); - if (decl.is_exported) { - const export_src = src; // TODO point to the export token - // The scope needs to have the decl in it. - try mod.analyzeExport(&block_scope.base, export_src, mem.spanZ(decl.name), decl); + if (type_changed and mod.emit_h != null) { + try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); } + } - return type_changed; + if (decl.is_exported) { + const export_src = src; // TODO point to the export token + // The scope needs to have the decl in it. + try mod.analyzeExport(&block_scope.base, export_src, mem.spanZ(decl.name), decl); } + + return type_changed; } /// Returns the depender's index of the dependee. -- cgit v1.2.3 From 1097b0ec77d421225250d981704aca6a617bd6b3 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 20 Jul 2021 18:51:40 -0700 Subject: codegen: fix lowering of AIR return instruction It incorrectly did not process the death of its operand. Additionally: * delete dead code accidentally introduced in fe14e339458a578657f3890f00d654a15c84422c * improve AIR printing code to include liveness data for operands. Now an exclamation point ("!") indicates the tombstone of an AIR instruction. --- src/Compilation.zig | 59 ++-------------------------- src/codegen.zig | 18 ++++----- src/print_air.zig | 109 ++++++++++++++++++++++++++++++++++++++-------------- 3 files changed, 91 insertions(+), 95 deletions(-) (limited to 'src/Compilation.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index ea484c2d15..78d03d4534 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2007,59 +2007,6 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor @panic("sadly stage2 is omitted from this build to save memory on the CI server"); const module = self.bin_file.options.module.?; assert(decl.has_tv); - if (decl.val.castTag(.function)) |payload| { - if (decl.owns_tv) { - const func = payload.data; - - var air = switch (func.state) { - .sema_failure, .dependency_failure => continue, - .queued => module.analyzeFnBody(decl, func) catch |err| switch (err) { - error.AnalysisFail => { - assert(func.state != .in_progress); - continue; - }, - error.OutOfMemory => return error.OutOfMemory, - }, - .in_progress => unreachable, - .inline_only => unreachable, // don't queue work for this - .success => unreachable, // don't queue it twice - }; - defer air.deinit(gpa); - - log.debug("analyze liveness of {s}", .{decl.name}); - var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); - defer liveness.deinit(gpa); - - if (builtin.mode == .Debug and self.verbose_air) { - std.debug.print("# Begin Function AIR: {s}:\n", .{decl.name}); - @import("print_air.zig").dump(gpa, air, liveness); - std.debug.print("# End Function AIR: {s}:\n", .{decl.name}); - } - - assert(decl.ty.hasCodeGenBits()); - - self.bin_file.updateFunc(module, func, air, liveness) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => { - decl.analysis = .codegen_failure; - continue; - }, - else => { - try module.failed_decls.ensureUnusedCapacity(gpa, 1); - module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( - gpa, - decl.srcLoc(), - "unable to codegen: {s}", - .{@errorName(err)}, - )); - decl.analysis = .codegen_failure_retryable; - continue; - }, - }; - continue; - } - } - assert(decl.ty.hasCodeGenBits()); self.bin_file.updateDecl(module, decl) catch |err| switch (err) { @@ -2069,7 +2016,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor continue; }, else => { - try module.failed_decls.ensureCapacity(gpa, module.failed_decls.count() + 1); + try module.failed_decls.ensureUnusedCapacity(gpa, 1); module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( gpa, decl.srcLoc(), @@ -2123,7 +2070,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor if (builtin.mode == .Debug and self.verbose_air) { std.debug.print("# Begin Function AIR: {s}:\n", .{decl.name}); - @import("print_air.zig").dump(gpa, air, liveness); + @import("print_air.zig").dump(gpa, air, decl.namespace.file_scope.zir, liveness); std.debug.print("# End Function AIR: {s}:\n", .{decl.name}); } @@ -2207,7 +2154,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor @panic("sadly stage2 is omitted from this build to save memory on the CI server"); const module = self.bin_file.options.module.?; self.bin_file.updateDeclLineNumber(module, decl) catch |err| { - try module.failed_decls.ensureCapacity(gpa, module.failed_decls.count() + 1); + try module.failed_decls.ensureUnusedCapacity(gpa, 1); module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( gpa, decl.srcLoc(), diff --git a/src/codegen.zig b/src/codegen.zig index fa096bc13f..84a47a70ac 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -481,7 +481,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn finishAir(bt: *BigTomb, result: MCValue) void { const is_used = !bt.function.liveness.isUnused(bt.inst); if (is_used) { - log.debug("{} => {}", .{ bt.inst, result }); + log.debug("%{d} => {}", .{ bt.inst, result }); const branch = &bt.function.branch_stack.items[bt.function.branch_stack.items.len - 1]; branch.inst_table.putAssumeCapacityNoClobber(bt.inst, result); } @@ -871,12 +871,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // zig fmt: on } if (std.debug.runtime_safety) { - if (self.air_bookkeeping != old_air_bookkeeping + 1) { - std.debug.panic( - \\in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. - \\Look for a missing call to finishAir or an extra call to it. - \\ - , .{ inst, air_tags[inst] }); + if (self.air_bookkeeping < old_air_bookkeeping + 1) { + std.debug.panic("in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. Look for a missing call to finishAir.", .{ inst, air_tags[inst] }); } } } @@ -963,7 +959,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } const is_used = @truncate(u1, tomb_bits) == 0; if (is_used) { - log.debug("{} => {}", .{ inst, result }); + log.debug("%{d} => {}", .{ inst, result }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; branch.inst_table.putAssumeCapacityNoClobber(inst, result); } @@ -1350,10 +1346,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.register_manager.registers[index] = inst; } } - log.debug("reusing {} => {}", .{ reg, inst }); + log.debug("%{d} => {} (reused)", .{ inst, reg }); }, .stack_offset => |off| { - log.debug("reusing stack offset {} => {}", .{ off, inst }); + log.debug("%{d} => stack offset {d} (reused)", .{ inst, off }); }, else => return false, } @@ -2852,7 +2848,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); try self.ret(operand); - return self.finishAirBookkeeping(); + return self.finishAir(inst, .dead, .{ un_op, .none, .none }); } fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { diff --git a/src/print_air.zig b/src/print_air.zig index 51f0ce4f49..21288ebff9 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -4,10 +4,11 @@ const fmtIntSizeBin = std.fmt.fmtIntSizeBin; const Module = @import("Module.zig"); const Value = @import("value.zig").Value; +const Zir = @import("Zir.zig"); const Air = @import("Air.zig"); const Liveness = @import("Liveness.zig"); -pub fn dump(gpa: *Allocator, air: Air, liveness: Liveness) void { +pub fn dump(gpa: *Allocator, air: Air, zir: Zir, liveness: Liveness) void { const instruction_bytes = air.instructions.len * // Here we don't use @sizeOf(Air.Inst.Data) because it would include // the debug safety tag but we want to measure release size. @@ -51,11 +52,13 @@ pub fn dump(gpa: *Allocator, air: Air, liveness: Liveness) void { .gpa = gpa, .arena = &arena.allocator, .air = air, + .zir = zir, .liveness = liveness, - .indent = 0, + .indent = 2, }; const stream = std.io.getStdErr().writer(); writer.writeAllConstants(stream) catch return; + stream.writeByte('\n') catch return; writer.writeBody(stream, air.getMainBody()) catch return; } @@ -63,6 +66,7 @@ const Writer = struct { gpa: *Allocator, arena: *Allocator, air: Air, + zir: Zir, liveness: Liveness, indent: usize, @@ -84,13 +88,13 @@ const Writer = struct { fn writeBody(w: *Writer, s: anytype, body: []const Air.Inst.Index) @TypeOf(s).Error!void { for (body) |inst| { try s.writeByteNTimes(' ', w.indent); - try s.print("%{d} ", .{inst}); - try w.writeInst(s, inst); if (w.liveness.isUnused(inst)) { - try s.writeAll(") unused\n"); + try s.print("%{d}!", .{inst}); } else { - try s.writeAll(")\n"); + try s.print("%{d} ", .{inst}); } + try w.writeInst(s, inst); + try s.writeAll(")\n"); } } @@ -176,21 +180,21 @@ const Writer = struct { } fn writeTyStr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - _ = w; - _ = inst; - try s.writeAll("TODO"); + const ty_str = w.air.instructions.items(.data)[inst].ty_str; + const name = w.zir.nullTerminatedString(ty_str.str); + try s.print("\"{}\", {}", .{ std.zig.fmtEscapes(name), ty_str.ty }); } fn writeBinOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const bin_op = w.air.instructions.items(.data)[inst].bin_op; - try w.writeInstRef(s, bin_op.lhs); + try w.writeOperand(s, inst, 0, bin_op.lhs); try s.writeAll(", "); - try w.writeInstRef(s, bin_op.rhs); + try w.writeOperand(s, inst, 1, bin_op.rhs); } fn writeUnOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const un_op = w.air.instructions.items(.data)[inst].un_op; - try w.writeInstRef(s, un_op); + try w.writeOperand(s, inst, 0, un_op); } fn writeNoOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { @@ -208,7 +212,7 @@ const Writer = struct { fn writeTyOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const ty_op = w.air.instructions.items(.data)[inst].ty_op; try s.print("{}, ", .{w.air.getRefType(ty_op.ty)}); - try w.writeInstRef(s, ty_op.operand); + try w.writeOperand(s, inst, 0, ty_op.operand); } fn writeBlock(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { @@ -229,7 +233,7 @@ const Writer = struct { const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; const extra = w.air.extraData(Air.StructField, ty_pl.payload); - try w.writeInstRef(s, extra.data.struct_ptr); + try w.writeOperand(s, inst, 0, extra.data.struct_ptr); try s.print(", {d}", .{extra.data.field_index}); } @@ -259,21 +263,21 @@ const Writer = struct { fn writeCall(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const pl_op = w.air.instructions.items(.data)[inst].pl_op; const extra = w.air.extraData(Air.Call, pl_op.payload); - const args = w.air.extra[extra.end..][0..extra.data.args_len]; - try w.writeInstRef(s, pl_op.operand); + const args = @bitCast([]const Air.Inst.Ref, w.air.extra[extra.end..][0..extra.data.args_len]); + try w.writeOperand(s, inst, 0, pl_op.operand); try s.writeAll(", ["); for (args) |arg, i| { if (i != 0) try s.writeAll(", "); - try w.writeInstRef(s, @intToEnum(Air.Inst.Ref, arg)); + try w.writeOperand(s, inst, 1 + i, arg); } try s.writeAll("]"); } fn writeBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const br = w.air.instructions.items(.data)[inst].br; - try w.writeInstIndex(s, br.block_inst); + try w.writeInstIndex(s, br.block_inst, false); try s.writeAll(", "); - try w.writeInstRef(s, br.operand); + try w.writeOperand(s, inst, 0, br.operand); } fn writeCondBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { @@ -281,16 +285,35 @@ const Writer = struct { const extra = w.air.extraData(Air.CondBr, pl_op.payload); const then_body = w.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = w.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; + const liveness_condbr = w.liveness.getCondBr(inst); - try w.writeInstRef(s, pl_op.operand); + try w.writeOperand(s, inst, 0, pl_op.operand); try s.writeAll(", {\n"); const old_indent = w.indent; w.indent += 2; + if (liveness_condbr.then_deaths.len != 0) { + try s.writeByteNTimes(' ', w.indent); + for (liveness_condbr.then_deaths) |operand, i| { + if (i != 0) try s.writeAll(" "); + try s.print("%{d}!", .{operand}); + } + try s.writeAll("\n"); + } + try w.writeBody(s, then_body); try s.writeByteNTimes(' ', old_indent); try s.writeAll("}, {\n"); + if (liveness_condbr.else_deaths.len != 0) { + try s.writeByteNTimes(' ', w.indent); + for (liveness_condbr.else_deaths) |operand, i| { + if (i != 0) try s.writeAll(" "); + try s.print("%{d}!", .{operand}); + } + try s.writeAll("\n"); + } + try w.writeBody(s, else_body); w.indent = old_indent; @@ -304,7 +327,7 @@ const Writer = struct { var extra_index: usize = switch_br.end; var case_i: u32 = 0; - try w.writeInstRef(s, pl_op.operand); + try w.writeOperand(s, inst, 0, pl_op.operand); const old_indent = w.indent; w.indent += 2; @@ -317,7 +340,7 @@ const Writer = struct { try s.writeAll(", ["); for (items) |item, item_i| { if (item_i != 0) try s.writeAll(", "); - try w.writeInstRef(s, item); + try w.writeInstRef(s, item, false); } try s.writeAll("] => {\n"); w.indent += 2; @@ -342,19 +365,49 @@ const Writer = struct { try s.writeAll("}"); } - fn writeInstRef(w: *Writer, s: anytype, inst: Air.Inst.Ref) @TypeOf(s).Error!void { - var i: usize = @enumToInt(inst); + fn writeOperand( + w: *Writer, + s: anytype, + inst: Air.Inst.Index, + op_index: usize, + operand: Air.Inst.Ref, + ) @TypeOf(s).Error!void { + const dies = if (op_index < Liveness.bpi - 1) + w.liveness.operandDies(inst, @intCast(Liveness.OperandInt, op_index)) + else blk: { + // TODO + break :blk false; + }; + return w.writeInstRef(s, operand, dies); + } + + fn writeInstRef( + w: *Writer, + s: anytype, + operand: Air.Inst.Ref, + dies: bool, + ) @TypeOf(s).Error!void { + var i: usize = @enumToInt(operand); if (i < Air.Inst.Ref.typed_value_map.len) { - return s.print("@{}", .{inst}); + return s.print("@{}", .{operand}); } i -= Air.Inst.Ref.typed_value_map.len; - return w.writeInstIndex(s, @intCast(Air.Inst.Index, i)); + return w.writeInstIndex(s, @intCast(Air.Inst.Index, i), dies); } - fn writeInstIndex(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + fn writeInstIndex( + w: *Writer, + s: anytype, + inst: Air.Inst.Index, + dies: bool, + ) @TypeOf(s).Error!void { _ = w; - return s.print("%{d}", .{inst}); + if (dies) { + try s.print("%{d}!", .{inst}); + } else { + try s.print("%{d}", .{inst}); + } } }; -- cgit v1.2.3