From 5d6f7b44c19b064a543b0c1eecb6ef5c671b612e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Jul 2021 20:42:47 -0700 Subject: stage2: rework AIR memory layout This commit changes the AIR file and the documentation of the memory layout. The actual work of modifying the surrounding code (in Sema and codegen) is not yet done. --- src/Module.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/Module.zig') diff --git a/src/Module.zig b/src/Module.zig index a1f6887fbd..2f1dc0b33b 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -21,7 +21,7 @@ const Type = @import("type.zig").Type; const TypedValue = @import("TypedValue.zig"); const Package = @import("Package.zig"); const link = @import("link.zig"); -const ir = @import("air.zig"); +const Air = @import("Air.zig"); const Zir = @import("Zir.zig"); const trace = @import("tracy.zig").trace; const AstGen = @import("AstGen.zig"); -- cgit v1.2.3 From ef7080aed1a1a4dc54cb837938e462b4e6720734 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 11 Jul 2021 16:32:11 -0700 Subject: stage2: update Liveness, SPIR-V for new AIR memory layout also do the inline assembly instruction --- BRANCH_TODO | 44 ---- src/Air.zig | 60 ++++-- src/Compilation.zig | 57 +++-- src/Liveness.zig | 1 + src/Module.zig | 36 +++- src/Sema.zig | 563 +++++++++++++++++++++++++------------------------- src/codegen/spirv.zig | 411 ++++++++++++++++++------------------ 7 files changed, 595 insertions(+), 577 deletions(-) (limited to 'src/Module.zig') diff --git a/BRANCH_TODO b/BRANCH_TODO index 5bc4d2a2f5..3b946edbbd 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,24 +1,6 @@ * be sure to test debug info of parameters - /// Each bit represents the index of an `Inst` parameter in the `args` field. - /// If a bit is set, it marks the end of the lifetime of the corresponding - /// instruction parameter. For example, 0b101 means that the first and - /// third `Inst` parameters' lifetimes end after this instruction, and will - /// not have any more following references. - /// The most significant bit being set means that the instruction itself is - /// never referenced, in other words its lifetime ends as soon as it finishes. - /// If bit 15 (0b1xxx_xxxx_xxxx_xxxx) is set, it means this instruction itself is unreferenced. - /// If bit 14 (0bx1xx_xxxx_xxxx_xxxx) is set, it means this is a special case and the - /// lifetimes of operands are encoded elsewhere. - deaths: DeathsInt = undefined, - - - pub const DeathsInt = u16; - pub const DeathsBitIndex = std.math.Log2Int(DeathsInt); - pub const unreferenced_bit_index = @typeInfo(DeathsInt).Int.bits - 1; - pub const deaths_bits = unreferenced_bit_index - 1; - pub fn isUnused(self: Inst) bool { return (self.deaths & (1 << unreferenced_bit_index)) != 0; } @@ -115,32 +97,6 @@ - pub const Assembly = struct { - pub const base_tag = Tag.assembly; - - base: Inst, - asm_source: []const u8, - is_volatile: bool, - output_constraint: ?[]const u8, - inputs: []const []const u8, - clobbers: []const []const u8, - args: []const *Inst, - - pub fn operandCount(self: *const Assembly) usize { - return self.args.len; - } - pub fn getOperand(self: *const Assembly, index: usize) ?*Inst { - if (index < self.args.len) - return self.args[index]; - return null; - } - }; - - pub const StructFieldPtr = struct { - struct_ptr: *Inst, - field_index: usize, - }; - /// For debugging purposes, prints a function representation to stderr. pub fn dumpFn(old_module: Module, module_fn: *Module.Fn) void { diff --git a/src/Air.zig b/src/Air.zig index c57232fba0..112845559d 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1,5 +1,7 @@ //! Analyzed Intermediate Representation. -//! Sema inputs ZIR and outputs AIR. +//! This data is produced by Sema and consumed by codegen. +//! Unlike ZIR where there is one instance for an entire source file, each function +//! gets its own `Air` instance. const std = @import("std"); const Value = @import("value.zig").Value; @@ -27,38 +29,48 @@ pub const Inst = struct { data: Data, pub const Tag = enum(u8) { + /// The first N instructions in Air must be one arg instruction per function parameter. + /// Uses the `ty` field. + arg, /// Float or integer addition. For integers, wrapping is undefined behavior. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. add, /// Integer addition. Wrapping is defined to be twos complement wrapping. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. addwrap, /// Float or integer subtraction. For integers, wrapping is undefined behavior. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. sub, /// Integer subtraction. Wrapping is defined to be twos complement wrapping. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. subwrap, /// Float or integer multiplication. For integers, wrapping is undefined behavior. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. mul, /// Integer multiplication. Wrapping is defined to be twos complement wrapping. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. mulwrap, /// Integer or float division. For integers, wrapping is undefined behavior. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. div, /// Allocates stack local memory. /// Uses the `ty` field. alloc, - /// TODO + /// Inline assembly. Uses the `ty_pl` field. Payload is `Asm`. assembly, /// Bitwise AND. `&`. /// Result type is the same as both operands. @@ -80,7 +92,7 @@ pub const Inst = struct { /// Uses the `ty_pl` field with payload `Block`. block, /// Return from a block with a result. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `br` field. br, /// Lowers to a hardware trap instruction, or the next best thing. @@ -109,11 +121,11 @@ pub const Inst = struct { /// Uses the `bin_op` field. cmp_neq, /// Conditional branch. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `pl_op` field. Operand is the condition. Payload is `CondBr`. cond_br, /// Switch branch. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `pl_op` field. Operand is the condition. Payload is `SwitchBr`. switch_br, /// A comptime-known value. Uses the `ty_pl` field, payload is index of @@ -166,7 +178,7 @@ pub const Inst = struct { load, /// A labeled block of code that loops forever. At the end of the body it is implied /// to repeat; no explicit "repeat" instruction terminates loop bodies. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `ty_pl` field. Payload is `Block`. loop, /// Converts a pointer to its address. Result type is always `usize`. @@ -178,7 +190,7 @@ pub const Inst = struct { /// Uses the `ty_op` field. ref, /// Return a value from a function. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `un_op` field. ret, /// Returns a pointer to a global variable. @@ -189,7 +201,7 @@ pub const Inst = struct { /// Uses the `bin_op` field. store, /// Indicates the program counter will never get to this instruction. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. unreach, /// Convert from one float type to another. /// Uses the `ty_op` field. @@ -343,6 +355,16 @@ pub const StructField = struct { field_index: u32, }; +/// Trailing: +/// 0. `Ref` for every outputs_len +/// 1. `Ref` for every inputs_len +pub const Asm = struct { + /// Index to the corresponding ZIR instruction. + /// `asm_source`, `outputs_len`, `inputs_len`, `clobbers_len`, `is_volatile`, and + /// clobbers are found via here. + zir_index: u32, +}; + pub fn getMainBody(air: Air) []const Air.Inst.Index { const body_index = air.extra[@enumToInt(ExtraIndex.main_block)]; const body_len = air.extra[body_index]; @@ -369,3 +391,11 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end .end = i, }; } + +pub fn deinit(air: *Air, gpa: *std.mem.Allocator) void { + air.instructions.deinit(gpa); + gpa.free(air.extra); + gpa.free(air.values); + gpa.free(air.variables); + air.* = undefined; +} diff --git a/src/Compilation.zig b/src/Compilation.zig index b9055eceed..74ad7b2aae 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -13,7 +13,7 @@ const target_util = @import("target.zig"); const Package = @import("Package.zig"); const link = @import("link.zig"); const trace = @import("tracy.zig").trace; -const liveness = @import("liveness.zig"); +const Liveness = @import("Liveness.zig"); const build_options = @import("build_options"); const LibCInstallation = @import("libc_installation.zig").LibCInstallation; const glibc = @import("glibc.zig"); @@ -1922,6 +1922,7 @@ pub fn getCompileLogOutput(self: *Compilation) []const u8 { } pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemory }!void { + const gpa = self.gpa; // If the terminal is dumb, we dont want to show the user all the // output. var progress: std.Progress = .{ .dont_print_on_dumb = true }; @@ -2005,7 +2006,8 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor assert(decl.has_tv); if (decl.val.castTag(.function)) |payload| { const func = payload.data; - switch (func.state) { + + var air = switch (func.state) { .queued => module.analyzeFnBody(decl, func) catch |err| switch (err) { error.AnalysisFail => { assert(func.state != .in_progress); @@ -2016,18 +2018,39 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor .in_progress => unreachable, .inline_only => unreachable, // don't queue work for this .sema_failure, .dependency_failure => continue, - .success => {}, - } - // Here we tack on additional allocations to the Decl's arena. The allocations - // are lifetime annotations in the ZIR. - var decl_arena = decl.value_arena.?.promote(module.gpa); - defer decl.value_arena.?.* = decl_arena.state; + .success => unreachable, // don't queue it twice + }; + defer air.deinit(gpa); + log.debug("analyze liveness of {s}", .{decl.name}); - try liveness.analyze(module.gpa, &decl_arena.allocator, func.body); + var liveness = try Liveness.analyze(gpa, air); + defer liveness.deinit(gpa); if (std.builtin.mode == .Debug and self.verbose_air) { func.dump(module.*); } + + assert(decl.ty.hasCodeGenBits()); + + self.bin_file.updateFunc(module, func, air, liveness) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => { + decl.analysis = .codegen_failure; + continue; + }, + else => { + try module.failed_decls.ensureUnusedCapacity(gpa, 1); + module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( + gpa, + decl.srcLoc(), + "unable to codegen: {s}", + .{@errorName(err)}, + )); + decl.analysis = .codegen_failure_retryable; + continue; + }, + }; + continue; } assert(decl.ty.hasCodeGenBits()); @@ -2039,9 +2062,9 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor continue; }, else => { - try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.count() + 1); + try module.failed_decls.ensureCapacity(gpa, module.failed_decls.count() + 1); module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( - module.gpa, + gpa, decl.srcLoc(), "unable to codegen: {s}", .{@errorName(err)}, @@ -2070,7 +2093,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor @panic("sadly stage2 is omitted from this build to save memory on the CI server"); const module = self.bin_file.options.module.?; const emit_h = module.emit_h.?; - _ = try emit_h.decl_table.getOrPut(module.gpa, decl); + _ = try emit_h.decl_table.getOrPut(gpa, decl); const decl_emit_h = decl.getEmitH(module); const fwd_decl = &decl_emit_h.fwd_decl; fwd_decl.shrinkRetainingCapacity(0); @@ -2079,7 +2102,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor .module = module, .error_msg = null, .decl = decl, - .fwd_decl = fwd_decl.toManaged(module.gpa), + .fwd_decl = fwd_decl.toManaged(gpa), // we don't want to emit optionals and error unions to headers since they have no ABI .typedefs = undefined, }; @@ -2087,14 +2110,14 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor c_codegen.genHeader(&dg) catch |err| switch (err) { error.AnalysisFail => { - try emit_h.failed_decls.put(module.gpa, decl, dg.error_msg.?); + try emit_h.failed_decls.put(gpa, decl, dg.error_msg.?); continue; }, else => |e| return e, }; fwd_decl.* = dg.fwd_decl.moveToUnmanaged(); - fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len); + fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len); }, }, .analyze_decl => |decl| { @@ -2111,9 +2134,9 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor @panic("sadly stage2 is omitted from this build to save memory on the CI server"); const module = self.bin_file.options.module.?; self.bin_file.updateDeclLineNumber(module, decl) catch |err| { - try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.count() + 1); + try module.failed_decls.ensureCapacity(gpa, module.failed_decls.count() + 1); module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( - module.gpa, + gpa, decl.srcLoc(), "unable to update line number: {s}", .{@errorName(err)}, diff --git a/src/Liveness.zig b/src/Liveness.zig index 828614dcbb..84e2495054 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -150,6 +150,7 @@ fn analyzeInst( const gpa = a.gpa; const table = &a.table; const inst_tags = a.air.instructions.items(.tag); + const inst_datas = a.air.instructions.items(.data); // No tombstone for this instruction means it is never referenced, // and its birth marks its own death. Very metal 🤘 diff --git a/src/Module.zig b/src/Module.zig index 2f1dc0b33b..6273243ee2 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -739,8 +739,6 @@ pub const Union = struct { pub const Fn = struct { /// The Decl that corresponds to the function itself. owner_decl: *Decl, - /// undefined unless analysis state is `success`. - body: ir.Body, /// The ZIR instruction that is a function instruction. Use this to find /// the body. We store this rather than the body directly so that when ZIR /// is regenerated on update(), we can map this to the new corresponding @@ -3585,17 +3583,19 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void { mod.gpa.free(kv.value); } -pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void { +pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { const tracy = trace(@src()); defer tracy.end(); + const gpa = mod.gpa; + // Use the Decl's arena for function memory. - var arena = decl.value_arena.?.promote(mod.gpa); + var arena = decl.value_arena.?.promote(gpa); defer decl.value_arena.?.* = arena.state; const fn_ty = decl.ty; - const param_inst_list = try mod.gpa.alloc(*ir.Inst, fn_ty.fnParamLen()); - defer mod.gpa.free(param_inst_list); + const param_inst_list = try gpa.alloc(*ir.Inst, fn_ty.fnParamLen()); + defer gpa.free(param_inst_list); for (param_inst_list) |*param_inst, param_index| { const param_type = fn_ty.fnParamType(param_index); @@ -3615,7 +3615,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void { var sema: Sema = .{ .mod = mod, - .gpa = mod.gpa, + .gpa = gpa, .arena = &arena.allocator, .code = zir, .owner_decl = decl, @@ -3626,6 +3626,11 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void { }; defer sema.deinit(); + // First few indexes of extra are reserved and set at the end. + const reserved_count = @typeInfo(Air.ExtraIndex).Enum.fields.len; + try sema.air_extra.ensureTotalCapacity(gpa, reserved_count); + sema.air_extra.items.len += reserved_count; + var inner_block: Scope.Block = .{ .parent = null, .sema = &sema, @@ -3634,20 +3639,29 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void { .inlining = null, .is_comptime = false, }; - defer inner_block.instructions.deinit(mod.gpa); + defer inner_block.instructions.deinit(gpa); // AIR currently requires the arg parameters to be the first N instructions - try inner_block.instructions.appendSlice(mod.gpa, param_inst_list); + try inner_block.instructions.appendSlice(gpa, param_inst_list); func.state = .in_progress; log.debug("set {s} to in_progress", .{decl.name}); try sema.analyzeFnBody(&inner_block, func.zir_body_inst); - const instructions = try arena.allocator.dupe(*ir.Inst, inner_block.instructions.items); + // Copy the block into place and mark that as the main block. + sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = sema.air_extra.items.len; + try sema.air_extra.appendSlice(inner_block.instructions.items); + func.state = .success; - func.body = .{ .instructions = instructions }; log.debug("set {s} to success", .{decl.name}); + + return Air{ + .instructions = sema.air_instructions.toOwnedSlice(), + .extra = sema.air_extra.toOwnedSlice(), + .values = sema.air_values.toOwnedSlice(), + .variables = sema.air_variables.toOwnedSlice(), + }; } fn markOutdatedDecl(mod: *Module, decl: *Decl) !void { diff --git a/src/Sema.zig b/src/Sema.zig index 85cb4aa423..b4e10837af 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1,6 +1,6 @@ //! Semantic analysis of ZIR instructions. //! Shared to every Block. Stored on the stack. -//! State used for compiling a `Zir` into AIR. +//! State used for compiling a ZIR into AIR. //! Transforms untyped ZIR instructions into semantically-analyzed AIR instructions. //! Does type checking, comptime control flow, and safety-check generation. //! This is the the heart of the Zig compiler. @@ -11,6 +11,10 @@ gpa: *Allocator, /// Points to the arena allocator of the Decl. arena: *Allocator, code: Zir, +air_instructions: std.MultiArrayList(Air.Inst) = .{}, +air_extra: ArrayListUnmanaged(u32) = .{}, +air_values: ArrayListUnmanaged(Value) = .{}, +air_variables: ArrayListUnmanaged(Module.Var) = .{}, /// Maps ZIR to AIR. inst_map: InstMap = .{}, /// When analyzing an inline function call, owner_decl is the Decl of the caller @@ -32,7 +36,7 @@ func: ?*Module.Fn, /// > Denormalized data to make `resolveInst` faster. This is 0 if not inside a function, /// > otherwise it is the number of parameters of the function. /// > param_count: u32 -param_inst_list: []const *ir.Inst, +param_inst_list: []const Air.Inst.Index, branch_quota: u32 = 1000, branch_count: u32 = 0, /// This field is updated when a new source location becomes active, so that @@ -65,10 +69,15 @@ const LazySrcLoc = Module.LazySrcLoc; const RangeSet = @import("RangeSet.zig"); const target_util = @import("target.zig"); -pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, *ir.Inst); +pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Index); pub fn deinit(sema: *Sema) void { - sema.inst_map.deinit(sema.gpa); + const gpa = sema.gpa; + sema.air_instructions.deinit(gpa); + sema.air_extra.deinit(gpa); + sema.air_values.deinit(gpa); + sema.air_variables.deinit(gpa); + sema.inst_map.deinit(gpa); sema.* = undefined; } @@ -108,7 +117,7 @@ pub fn analyzeFnBody( /// Returns only the result from the body that is specified. /// Only appropriate to call when it is determined at comptime that this body /// has no peers. -fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!*Inst { +fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!Air.Inst.Index { const break_inst = try sema.analyzeBody(block, body); const operand_ref = sema.code.instructions.items(.data)[break_inst].@"break".operand; return sema.resolveInst(operand_ref); @@ -533,7 +542,7 @@ pub fn analyzeBody( } } -fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const extended = sema.code.instructions.items(.data)[inst].extended; switch (extended.opcode) { // zig fmt: off @@ -569,7 +578,7 @@ fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } /// TODO when we rework AIR memory layout, this function will no longer have a possible error. -pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) error{OutOfMemory}!*ir.Inst { +pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) error{OutOfMemory}!Air.Inst.Index { var i: usize = @enumToInt(zir_ref); // First section of indexes correspond to a set number of constant values. @@ -618,19 +627,19 @@ pub fn resolveType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, zir_ref: Z return sema.resolveAirAsType(block, src, air_inst); } -fn resolveAirAsType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, air_inst: *ir.Inst) !Type { +fn resolveAirAsType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, air_inst: Air.Inst.Index) !Type { const wanted_type = Type.initTag(.@"type"); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); return val.toType(sema.arena); } -fn resolveConstValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: *ir.Inst) !Value { +fn resolveConstValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !Value { return (try sema.resolveDefinedValue(block, src, base)) orelse return sema.failWithNeededComptime(block, src); } -fn resolveDefinedValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: *ir.Inst) !?Value { +fn resolveDefinedValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !?Value { if (try sema.resolvePossiblyUndefinedValue(block, src, base)) |val| { if (val.isUndef()) { return sema.failWithUseOfUndef(block, src); @@ -644,7 +653,7 @@ fn resolvePossiblyUndefinedValue( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - base: *ir.Inst, + base: Air.Inst.Index, ) !?Value { if (try sema.typeHasOnePossibleValue(block, src, base.ty)) |opv| { return opv; @@ -708,13 +717,13 @@ pub fn resolveInstConst( }; } -fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO implement zir_sema.zirBitcastResultPtr", .{}); } -fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = inst; const tracy = trace(@src()); defer tracy.end(); @@ -749,7 +758,7 @@ fn zirStructDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); @@ -820,7 +829,7 @@ fn zirEnumDecl( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1017,7 +1026,7 @@ fn zirUnionDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1081,7 +1090,7 @@ fn zirOpaqueDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1101,7 +1110,7 @@ fn zirErrorSetDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1141,7 +1150,7 @@ fn zirRetPtr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1153,7 +1162,7 @@ fn zirRetPtr( return block.addNoOp(src, ptr_type, .alloc); } -fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1166,7 +1175,7 @@ fn zirRetType( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1191,7 +1200,7 @@ fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) I fn ensureResultUsed( sema: *Sema, block: *Scope.Block, - operand: *Inst, + operand: Air.Inst.Index, src: LazySrcLoc, ) InnerError!void { switch (operand.ty.zigTypeTag()) { @@ -1213,7 +1222,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde } } -fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1247,7 +1256,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const arg_name = inst_data.get(sema.code); const arg_index = sema.next_arg_index; @@ -1269,13 +1278,13 @@ fn zirAllocExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocExtended", .{}); } -fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1298,13 +1307,13 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne }); } -fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocInferredComptime", .{}); } -fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1317,7 +1326,7 @@ fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!* return block.addNoOp(var_decl_src, ptr_type, .alloc); } -fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1336,7 +1345,7 @@ fn zirAllocInferred( block: *Scope.Block, inst: Zir.Inst.Index, inferred_alloc_ty: Type, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1589,7 +1598,7 @@ fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.storePtr(block, src, ptr, value); } -fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1625,7 +1634,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.constType(sema.arena, src, param_type); } -fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1653,7 +1662,7 @@ fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In return sema.analyzeDeclRef(block, .unneeded, new_decl); } -fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1662,7 +1671,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In return sema.mod.constIntUnsigned(sema.arena, .unneeded, Type.initTag(.comptime_int), int); } -fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1680,7 +1689,7 @@ fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! }); } -fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].float; @@ -1693,7 +1702,7 @@ fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!* }); } -fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; @@ -1722,7 +1731,7 @@ fn zirCompileLog( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { var managed = sema.mod.compile_log_text.toManaged(sema.gpa); defer sema.mod.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); @@ -1772,7 +1781,7 @@ fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Z return sema.panicWithMsg(block, src, msg_inst); } -fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1832,12 +1841,12 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerE // Loop repetition is implied so the last instruction may or may not be a noreturn instruction. try child_block.instructions.append(sema.gpa, &loop_inst.base); - loop_inst.body = .{ .instructions = try sema.arena.dupe(*Inst, loop_block.instructions.items) }; + loop_inst.body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, loop_block.instructions.items) }; return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } -fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1847,13 +1856,13 @@ fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirCImport", .{}); } -fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirSuspendBlock", .{}); } -fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1911,7 +1920,7 @@ fn resolveBlockBody( child_block: *Scope.Block, body: []const Zir.Inst.Index, merges: *Scope.Block.Merges, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { _ = try sema.analyzeBody(child_block, body); return sema.analyzeBlockBody(parent_block, src, child_block, merges); } @@ -1922,7 +1931,7 @@ fn analyzeBlockBody( src: LazySrcLoc, child_block: *Scope.Block, merges: *Scope.Block.Merges, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1933,7 +1942,7 @@ fn analyzeBlockBody( if (merges.results.items.len == 0) { // No need for a block instruction. We can put the new instructions // directly into the parent block. - const copied_instructions = try sema.arena.dupe(*Inst, child_block.instructions.items); + const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items); try parent_block.instructions.appendSlice(sema.gpa, copied_instructions); return copied_instructions[copied_instructions.len - 1]; } @@ -1944,7 +1953,7 @@ fn analyzeBlockBody( if (br_block == merges.block_inst) { // No need for a block instruction. We can put the new instructions directly // into the parent block. Here we omit the break instruction. - const copied_instructions = try sema.arena.dupe(*Inst, child_block.instructions.items[0..last_inst_index]); + const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items[0..last_inst_index]); try parent_block.instructions.appendSlice(sema.gpa, copied_instructions); return merges.results.items[0]; } @@ -1959,7 +1968,7 @@ fn analyzeBlockBody( const resolved_ty = try sema.resolvePeerTypes(parent_block, src, merges.results.items); merges.block_inst.base.ty = resolved_ty; merges.block_inst.body = .{ - .instructions = try sema.arena.dupe(*Inst, child_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items), }; // Now that the block has its type resolved, we need to go back into all the break // instructions, and insert type coercion on the operands. @@ -1991,7 +2000,7 @@ fn analyzeBlockBody( }, .block = merges.block_inst, .body = .{ - .instructions = try sema.arena.dupe(*Inst, coerce_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, coerce_block.instructions.items), }, }; } @@ -2130,7 +2139,7 @@ fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError _ = try block.addDbgStmt(.unneeded, inst_data.line, inst_data.column); } -fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2138,7 +2147,7 @@ fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeDeclRef(block, src, decl); } -fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2192,7 +2201,7 @@ fn zirCall( inst: Zir.Inst.Index, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2204,7 +2213,7 @@ fn zirCall( const func = try sema.resolveInst(extra.data.callee); // TODO handle function calls of generic functions - const resolved_args = try sema.arena.alloc(*Inst, args.len); + const resolved_args = try sema.arena.alloc(Air.Inst.Index, args.len); for (args) |zir_arg, i| { // the args are already casted to the result of a param type instruction. resolved_args[i] = try sema.resolveInst(zir_arg); @@ -2216,13 +2225,13 @@ fn zirCall( fn analyzeCall( sema: *Sema, block: *Scope.Block, - func: *ir.Inst, + func: Air.Inst.Index, func_src: LazySrcLoc, call_src: LazySrcLoc, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, - args: []const *ir.Inst, -) InnerError!*ir.Inst { + args: []const Air.Inst.Index, +) InnerError!Air.Inst.Index { if (func.ty.zigTypeTag() != .Fn) return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func.ty}); @@ -2279,7 +2288,7 @@ fn analyzeCall( const is_comptime_call = block.is_comptime or modifier == .compile_time; const is_inline_call = is_comptime_call or modifier == .always_inline or func.ty.fnCallingConvention() == .Inline; - const result: *Inst = if (is_inline_call) res: { + const result: Air.Inst.Index = if (is_inline_call) res: { const func_val = try sema.resolveConstValue(block, func_src, func); const module_fn = switch (func_val.tag()) { .function => func_val.castTag(.function).?.data, @@ -2377,7 +2386,7 @@ fn analyzeCall( return result; } -fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2389,7 +2398,7 @@ fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.constType(sema.arena, src, ty); } -fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2401,7 +2410,7 @@ fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.mod.constType(sema.arena, src, opt_type); } -fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const array_type = try sema.resolveType(block, src, inst_data.operand); @@ -2409,7 +2418,7 @@ fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.constType(sema.arena, src, elem_type); } -fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -2424,7 +2433,7 @@ fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.mod.constType(sema.arena, src, vector_type); } -fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2437,7 +2446,7 @@ fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.constType(sema.arena, .unneeded, array_ty); } -fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2452,7 +2461,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) return sema.mod.constType(sema.arena, .unneeded, array_ty); } -fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2465,7 +2474,7 @@ fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.mod.constType(sema.arena, src, anyframe_type); } -fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2486,7 +2495,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.constType(sema.arena, src, err_union_ty); } -fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2505,7 +2514,7 @@ fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr }); } -fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2535,7 +2544,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addUnOp(src, result_ty, .bitcast, op_coerced); } -fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2568,7 +2577,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addUnOp(src, Type.initTag(.anyerror), .bitcast, op); } -fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2658,7 +2667,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn }); } -fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2672,7 +2681,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE }); } -fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const mod = sema.mod; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; @@ -2680,7 +2689,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); - const enum_tag: *Inst = switch (operand.ty.zigTypeTag()) { + const enum_tag: Air.Inst.Index = switch (operand.ty.zigTypeTag()) { .Enum => operand, .Union => { //if (!operand.ty.unionHasTag()) { @@ -2754,7 +2763,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return block.addUnOp(src, int_tag_ty, .bitcast, enum_tag); } -fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const mod = sema.mod; const target = mod.getTarget(); const arena = sema.arena; @@ -2815,7 +2824,7 @@ fn zirOptionalPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2858,7 +2867,7 @@ fn zirOptionalPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2896,7 +2905,7 @@ fn zirErrUnionPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2930,7 +2939,7 @@ fn zirErrUnionPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2969,7 +2978,7 @@ fn zirErrUnionPayloadPtr( } /// Value in, value out -fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2995,7 +3004,7 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner } /// Pointer in, value out -fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3042,7 +3051,7 @@ fn zirFunc( block: *Scope.Block, inst: Zir.Inst.Index, inferred_error_set: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3093,7 +3102,7 @@ fn funcCommon( is_extern: bool, src_locs: Zir.Inst.Func.SrcLocs, opt_lib_name: ?[]const u8, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const bare_return_type = try sema.resolveType(block, ret_ty_src, zir_return_type); @@ -3234,7 +3243,7 @@ fn funcCommon( return result; } -fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3242,7 +3251,7 @@ fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Ins return sema.analyzeAs(block, .unneeded, bin_inst.lhs, bin_inst.rhs); } -fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3258,13 +3267,13 @@ fn analyzeAs( src: LazySrcLoc, zir_dest_type: Zir.Inst.Ref, zir_operand: Zir.Inst.Ref, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const dest_type = try sema.resolveType(block, src, zir_dest_type); const operand = try sema.resolveInst(zir_operand); return sema.coerce(block, dest_type, operand, src); } -fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3281,7 +3290,7 @@ fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(src, ty, .ptrtoint, ptr); } -fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3299,7 +3308,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3312,7 +3321,7 @@ fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3327,7 +3336,7 @@ fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3340,7 +3349,7 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3383,7 +3392,7 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten int", .{}); } -fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3396,7 +3405,7 @@ fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.bitcast(block, dest_type, operand); } -fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3439,7 +3448,7 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten float", .{}); } -fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3454,7 +3463,7 @@ fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeLoad(block, sema.src, result_ptr, sema.src); } -fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3472,7 +3481,7 @@ fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3482,7 +3491,7 @@ fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); } -fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3495,7 +3504,7 @@ fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); } -fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3508,7 +3517,7 @@ fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.analyzeSlice(block, src, array_ptr, start, null, null, .unneeded); } -fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3522,7 +3531,7 @@ fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeSlice(block, src, array_ptr, start, end, null, .unneeded); } -fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3544,7 +3553,7 @@ fn zirSwitchCapture( inst: Zir.Inst.Index, is_multi: bool, is_ref: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3563,7 +3572,7 @@ fn zirSwitchCaptureElse( block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3582,7 +3591,7 @@ fn zirSwitchBlock( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3615,7 +3624,7 @@ fn zirSwitchBlockMulti( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3645,14 +3654,14 @@ fn zirSwitchBlockMulti( fn analyzeSwitch( sema: *Sema, block: *Scope.Block, - operand: *Inst, + operand: Air.Inst.Index, extra_end: usize, special_prong: Zir.SpecialProng, scalar_cases_len: usize, multi_cases_len: usize, switch_inst: Zir.Inst.Index, src_node_offset: i32, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const gpa = sema.gpa; const mod = sema.mod; @@ -4187,7 +4196,7 @@ fn analyzeSwitch( cases[scalar_i] = .{ .item = item_val, - .body = .{ .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items) }, + .body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items) }, }; } @@ -4207,7 +4216,7 @@ fn analyzeSwitch( case_block.instructions.shrinkRetainingCapacity(0); - var any_ok: ?*Inst = null; + var any_ok: ?Air.Inst.Index = null; const bool_ty = comptime Type.initTag(.bool); for (items) |item_ref| { @@ -4280,7 +4289,7 @@ fn analyzeSwitch( try case_block.instructions.append(gpa, &new_condbr.base); const cond_body: Body = .{ - .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), }; case_block.instructions.shrinkRetainingCapacity(0); @@ -4288,7 +4297,7 @@ fn analyzeSwitch( extra_index += body_len; _ = try sema.analyzeBody(&case_block, body); new_condbr.then_body = .{ - .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), }; if (prev_condbr) |condbr| { condbr.else_body = cond_body; @@ -4303,7 +4312,7 @@ fn analyzeSwitch( case_block.instructions.shrinkRetainingCapacity(0); _ = try sema.analyzeBody(&case_block, special.body); const else_body: Body = .{ - .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), }; if (prev_condbr) |condbr| { condbr.else_body = else_body; @@ -4507,7 +4516,7 @@ fn validateSwitchNoRange( return sema.mod.failWithOwnedErrorMsg(&block.base, msg); } -fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; _ = extra; @@ -4516,7 +4525,7 @@ fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, src, "TODO implement zirHasField", .{}); } -fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -4541,7 +4550,7 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return mod.constBool(arena, src, false); } -fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4566,13 +4575,13 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return mod.constType(sema.arena, src, file_root_decl.ty); } -fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; _ = inst; return sema.mod.fail(&block.base, sema.src, "TODO implement zirRetErrValueCode", .{}); } -fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4581,7 +4590,7 @@ fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In return sema.mod.fail(&block.base, sema.src, "TODO implement zirShl", .{}); } -fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4594,7 +4603,7 @@ fn zirBitwise( block: *Scope.Block, inst: Zir.Inst.Index, ir_tag: ir.Inst.Tag, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4606,7 +4615,7 @@ fn zirBitwise( const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); - const instructions = &[_]*Inst{ lhs, rhs }; + const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -4652,7 +4661,7 @@ fn zirBitwise( return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); } -fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4660,7 +4669,7 @@ fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.fail(&block.base, sema.src, "TODO implement zirBitNot", .{}); } -fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4668,7 +4677,7 @@ fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayCat", .{}); } -fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4681,7 +4690,7 @@ fn zirNegate( block: *Scope.Block, inst: Zir.Inst.Index, tag_override: Zir.Inst.Tag, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4695,7 +4704,7 @@ fn zirNegate( return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src); } -fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4715,7 +4724,7 @@ fn zirOverflowArithmetic( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4729,13 +4738,13 @@ fn analyzeArithmetic( sema: *Sema, block: *Scope.Block, zir_tag: Zir.Inst.Tag, - lhs: *Inst, - rhs: *Inst, + lhs: Air.Inst.Index, + rhs: Air.Inst.Index, src: LazySrcLoc, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, -) InnerError!*Inst { - const instructions = &[_]*Inst{ lhs, rhs }; +) InnerError!Air.Inst.Index { + const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -4844,7 +4853,7 @@ fn analyzeArithmetic( return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); } -fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4859,7 +4868,7 @@ fn zirAsm( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4899,7 +4908,7 @@ fn zirAsm( }; }; - const args = try sema.arena.alloc(*Inst, inputs_len); + const args = try sema.arena.alloc(Air.Inst.Index, inputs_len); const inputs = try sema.arena.alloc([]const u8, inputs_len); for (args) |*arg, arg_i| { @@ -4943,7 +4952,7 @@ fn zirCmp( block: *Scope.Block, inst: Zir.Inst.Index, op: std.math.CompareOperator, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5009,7 +5018,7 @@ fn zirCmp( return mod.constBool(sema.arena, src, lhs.value().?.eql(rhs.value().?) == (op == .eq)); } - const instructions = &[_]*Inst{ lhs, rhs }; + const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); if (!resolved_type.isSelfComparable(is_equality_cmp)) { return mod.fail(&block.base, src, "operator not allowed for type '{}'", .{resolved_type}); @@ -5041,7 +5050,7 @@ fn zirCmp( return block.addBinOp(src, bool_type, tag, casted_lhs, casted_rhs); } -fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5051,7 +5060,7 @@ fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.constIntUnsigned(sema.arena, src, Type.initTag(.comptime_int), abi_size); } -fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5065,7 +5074,7 @@ fn zirThis( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirThis", .{}); } @@ -5074,7 +5083,7 @@ fn zirRetAddr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirRetAddr", .{}); } @@ -5083,12 +5092,12 @@ fn zirBuiltinSrc( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinSrc", .{}); } -fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); @@ -5131,7 +5140,7 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } } -fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; @@ -5140,7 +5149,7 @@ fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.constType(sema.arena, src, operand.ty); } -fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -5149,13 +5158,13 @@ fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.mod.constType(sema.arena, src, elem_ty); } -fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirTypeofLog2IntType", .{}); } -fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirLog2IntType", .{}); @@ -5165,7 +5174,7 @@ fn zirTypeofPeer( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5173,7 +5182,7 @@ fn zirTypeofPeer( const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; const args = sema.code.refSlice(extra.end, extended.small); - const inst_list = try sema.gpa.alloc(*ir.Inst, args.len); + const inst_list = try sema.gpa.alloc(Air.Inst.Index, args.len); defer sema.gpa.free(inst_list); for (args) |arg_ref, i| { @@ -5184,7 +5193,7 @@ fn zirTypeofPeer( return sema.mod.constType(sema.arena, src, result_type); } -fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5206,7 +5215,7 @@ fn zirBoolOp( block: *Scope.Block, inst: Zir.Inst.Index, comptime is_bool_or: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5237,7 +5246,7 @@ fn zirBoolBr( parent_block: *Scope.Block, inst: Zir.Inst.Index, is_bool_or: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5292,12 +5301,12 @@ fn zirBoolBr( const rhs_result = try sema.resolveBody(rhs_block, body); _ = try rhs_block.addBr(src, block_inst, rhs_result); - const air_then_body: ir.Body = .{ .instructions = try sema.arena.dupe(*Inst, then_block.instructions.items) }; - const air_else_body: ir.Body = .{ .instructions = try sema.arena.dupe(*Inst, else_block.instructions.items) }; + const air_then_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, then_block.instructions.items) }; + const air_else_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, else_block.instructions.items) }; _ = try child_block.addCondBr(src, lhs, air_then_body, air_else_body); block_inst.body = .{ - .instructions = try sema.arena.dupe(*Inst, child_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items), }; try parent_block.instructions.append(sema.gpa, &block_inst.base); return &block_inst.base; @@ -5307,7 +5316,7 @@ fn zirIsNonNull( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5321,7 +5330,7 @@ fn zirIsNonNullPtr( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5332,7 +5341,7 @@ fn zirIsNonNullPtr( return sema.analyzeIsNull(block, src, loaded, true); } -fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5341,7 +5350,7 @@ fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeIsNonErr(block, inst_data.src(), operand); } -fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5385,14 +5394,14 @@ fn zirCondbr( _ = try sema.analyzeBody(&sub_block, then_body); const air_then_body: ir.Body = .{ - .instructions = try sema.arena.dupe(*Inst, sub_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, sub_block.instructions.items), }; sub_block.instructions.shrinkRetainingCapacity(0); _ = try sema.analyzeBody(&sub_block, else_body); const air_else_body: ir.Body = .{ - .instructions = try sema.arena.dupe(*Inst, sub_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, sub_block.instructions.items), }; _ = try parent_block.addCondBr(src, cond, air_then_body, air_else_body); @@ -5470,7 +5479,7 @@ fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError fn analyzeRet( sema: *Sema, block: *Scope.Block, - operand: *Inst, + operand: Air.Inst.Index, src: LazySrcLoc, need_coercion: bool, ) InnerError!Zir.Inst.Index { @@ -5505,7 +5514,7 @@ fn floatOpAllowed(tag: Zir.Inst.Tag) bool { }; } -fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5526,7 +5535,7 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.mod.constType(sema.arena, .unneeded, ty); } -fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5580,7 +5589,7 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.constType(sema.arena, src, ty); } -fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5594,13 +5603,13 @@ fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In }); } -fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnionInitPtr", .{}); } -fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst { +fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { const mod = sema.mod; const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); @@ -5622,7 +5631,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: mem.set(Zir.Inst.Index, found_fields, 0); // The init values to use for the struct instance. - const field_inits = try gpa.alloc(*ir.Inst, struct_obj.fields.count()); + const field_inits = try gpa.alloc(Air.Inst.Index, struct_obj.fields.count()); defer gpa.free(field_inits); var field_i: u32 = 0; @@ -5713,7 +5722,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return mod.fail(&block.base, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{}); } -fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst { +fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5721,7 +5730,7 @@ fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ return sema.mod.fail(&block.base, src, "TODO: Sema.zirStructInitAnon", .{}); } -fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst { +fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5729,7 +5738,7 @@ fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInit", .{}); } -fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst { +fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5737,13 +5746,13 @@ fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_r return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInitAnon", .{}); } -fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldTypeRef", .{}); } -fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const src = inst_data.src(); @@ -5765,7 +5774,7 @@ fn zirErrorReturnTrace( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorReturnTrace", .{}); } @@ -5774,7 +5783,7 @@ fn zirFrame( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrame", .{}); } @@ -5783,84 +5792,84 @@ fn zirFrameAddress( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameAddress", .{}); } -fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignOf", .{}); } -fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBoolToInt", .{}); } -fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirEmbedFile", .{}); } -fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorName", .{}); } -fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnaryMath", .{}); } -fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTagName", .{}); } -fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReify", .{}); } -fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTypeName", .{}); } -fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameType", .{}); } -fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameSize", .{}); } -fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFloatToInt", .{}); } -fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirIntToFloat", .{}); } -fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5923,199 +5932,199 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(src, type_res, .bitcast, operand_coerced); } -fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrSetCast", .{}); } -fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPtrCast", .{}); } -fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTruncate", .{}); } -fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignCast", .{}); } -fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirClz", .{}); } -fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCtz", .{}); } -fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPopCount", .{}); } -fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirByteSwap", .{}); } -fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitReverse", .{}); } -fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivExact", .{}); } -fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivFloor", .{}); } -fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivTrunc", .{}); } -fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMod", .{}); } -fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirRem", .{}); } -fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShlExact", .{}); } -fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShrExact", .{}); } -fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitOffsetOf", .{}); } -fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirOffsetOf", .{}); } -fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCmpxchg", .{}); } -fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirSplat", .{}); } -fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReduce", .{}); } -fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShuffle", .{}); } -fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicLoad", .{}); } -fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicRmw", .{}); } -fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicStore", .{}); } -fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMulAdd", .{}); } -fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinCall", .{}); } -fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldPtrType", .{}); } -fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldParentPtr", .{}); } -fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy", .{}); } -fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset", .{}); } -fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinAsyncCall", .{}); } -fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirResume", .{}); @@ -6126,7 +6135,7 @@ fn zirAwait( block: *Scope.Block, inst: Zir.Inst.Index, is_nosuspend: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -6138,7 +6147,7 @@ fn zirVarExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const src = sema.src; const ty_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at type @@ -6204,7 +6213,7 @@ fn zirFuncExtended( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -6271,7 +6280,7 @@ fn zirCUndef( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCUndef", .{}); @@ -6281,7 +6290,7 @@ fn zirCInclude( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCInclude", .{}); @@ -6291,7 +6300,7 @@ fn zirCDefine( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCDefine", .{}); @@ -6301,7 +6310,7 @@ fn zirWasmMemorySize( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemorySize", .{}); @@ -6311,7 +6320,7 @@ fn zirWasmMemoryGrow( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemoryGrow", .{}); @@ -6321,7 +6330,7 @@ fn zirBuiltinExtern( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinExtern", .{}); @@ -6355,7 +6364,7 @@ pub const PanicId = enum { invalid_error_code, }; -fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id: PanicId) !void { +fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Index, panic_id: PanicId) !void { const block_inst = try sema.arena.create(Inst.Block); block_inst.* = .{ .base = .{ @@ -6364,12 +6373,12 @@ fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id: .src = ok.src, }, .body = .{ - .instructions = try sema.arena.alloc(*Inst, 1), // Only need space for the condbr. + .instructions = try sema.arena.alloc(Air.Inst.Index, 1), // Only need space for the condbr. }, }; const ok_body: ir.Body = .{ - .instructions = try sema.arena.alloc(*Inst, 1), // Only need space for the br_void. + .instructions = try sema.arena.alloc(Air.Inst.Index, 1), // Only need space for the br_void. }; const br_void = try sema.arena.create(Inst.BrVoid); br_void.* = .{ @@ -6395,7 +6404,7 @@ fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id: _ = try sema.safetyPanic(&fail_block, ok.src, panic_id); - const fail_body: ir.Body = .{ .instructions = try sema.arena.dupe(*Inst, fail_block.instructions.items) }; + const fail_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, fail_block.instructions.items) }; const condbr = try sema.arena.create(Inst.CondBr); condbr.* = .{ @@ -6417,7 +6426,7 @@ fn panicWithMsg( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - msg_inst: *ir.Inst, + msg_inst: Air.Inst.Index, ) !Zir.Inst.Index { const mod = sema.mod; const arena = sema.arena; @@ -6438,7 +6447,7 @@ fn panicWithMsg( .ty = try mod.optionalType(arena, ptr_stack_trace_ty), .val = Value.initTag(.null_value), }); - const args = try arena.create([2]*ir.Inst); + const args = try arena.create([2]Air.Inst.Index); args.* = .{ msg_inst, null_stack_trace }; _ = try sema.analyzeCall(block, panic_fn, src, src, .auto, false, args); return always_noreturn; @@ -6494,10 +6503,10 @@ fn namedFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - object_ptr: *Inst, + object_ptr: Air.Inst.Index, field_name: []const u8, field_name_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const mod = sema.mod; const arena = sema.arena; @@ -6647,7 +6656,7 @@ fn analyzeNamespaceLookup( src: LazySrcLoc, namespace: *Scope.Namespace, decl_name: []const u8, -) InnerError!?*Inst { +) InnerError!?Air.Inst.Index { const mod = sema.mod; const gpa = sema.gpa; if (try sema.lookupInNamespace(namespace, decl_name)) |decl| { @@ -6671,11 +6680,11 @@ fn analyzeStructFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - struct_ptr: *Inst, + struct_ptr: Air.Inst.Index, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const mod = sema.mod; const arena = sema.arena; assert(unresolved_struct_ty.zigTypeTag() == .Struct); @@ -6706,11 +6715,11 @@ fn analyzeUnionFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - union_ptr: *Inst, + union_ptr: Air.Inst.Index, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_union_ty: Type, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const mod = sema.mod; const arena = sema.arena; assert(unresolved_union_ty.zigTypeTag() == .Union); @@ -6743,10 +6752,10 @@ fn elemPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: *Inst, - elem_index: *Inst, + array_ptr: Air.Inst.Index, + elem_index: Air.Inst.Index, elem_index_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const array_ty = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, array_ptr.src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -6770,10 +6779,10 @@ fn elemPtrArray( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: *Inst, - elem_index: *Inst, + array_ptr: Air.Inst.Index, + elem_index: Air.Inst.Index, elem_index_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { if (array_ptr.value()) |array_ptr_val| { if (elem_index.value()) |index_val| { // Both array pointer and index are compile-time known. @@ -6798,9 +6807,9 @@ fn coerce( sema: *Sema, block: *Scope.Block, dest_type: Type, - inst: *Inst, + inst: Air.Inst.Index, inst_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { if (dest_type.tag() == .var_args_param) { return sema.coerceVarArgParam(block, inst); } @@ -6976,7 +6985,7 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult return .no_match; } -fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) InnerError!?*Inst { +fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) InnerError!?Air.Inst.Index { const val = inst.value() orelse return null; const src_zig_tag = inst.ty.zigTypeTag(); const dst_zig_tag = dest_type.zigTypeTag(); @@ -7014,7 +7023,7 @@ fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) Inn return null; } -fn coerceVarArgParam(sema: *Sema, block: *Scope.Block, inst: *Inst) !*Inst { +fn coerceVarArgParam(sema: *Sema, block: *Scope.Block, inst: Air.Inst.Index) !Air.Inst.Index { switch (inst.ty.zigTypeTag()) { .ComptimeInt, .ComptimeFloat => return sema.mod.fail(&block.base, inst.src, "integer and float literals in var args function must be casted", .{}), else => {}, @@ -7027,8 +7036,8 @@ fn storePtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - ptr: *Inst, - uncasted_value: *Inst, + ptr: Air.Inst.Index, + uncasted_value: Air.Inst.Index, ) !void { if (ptr.ty.isConstPtr()) return sema.mod.fail(&block.base, src, "cannot assign to constant", .{}); @@ -7076,7 +7085,7 @@ fn storePtr( _ = try block.addBinOp(src, Type.initTag(.void), .store, ptr, value); } -fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { if (inst.value()) |val| { // Keep the comptime Value representation; take the new type. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7086,7 +7095,7 @@ fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Ins return block.addUnOp(inst.src, dest_type, .bitcast, inst); } -fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7094,7 +7103,7 @@ fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{}); } -fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7102,12 +7111,12 @@ fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); } -fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!*Inst { +fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index { const decl_ref = try sema.analyzeDeclRef(block, src, decl); return sema.analyzeLoad(block, src, decl_ref, src); } -fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!*Inst { +fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index { try sema.mod.declareDeclDependency(sema.owner_decl, decl); sema.mod.ensureDeclAnalyzed(decl) catch |err| { if (sema.func) |func| { @@ -7128,7 +7137,7 @@ fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl }); } -fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!*Inst { +fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!Air.Inst.Index { const variable = tv.val.castTag(.variable).?.data; const ty = try sema.mod.simplePtrType(sema.arena, tv.ty, variable.is_mutable, .One); @@ -7157,8 +7166,8 @@ fn analyzeRef( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: *Inst, -) InnerError!*Inst { + operand: Air.Inst.Index, +) InnerError!Air.Inst.Index { const ptr_type = try sema.mod.simplePtrType(sema.arena, operand.ty, false, .One); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |val| { @@ -7176,9 +7185,9 @@ fn analyzeLoad( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - ptr: *Inst, + ptr: Air.Inst.Index, ptr_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const elem_ty = switch (ptr.ty.zigTypeTag()) { .Pointer => ptr.ty.elemType(), else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr.ty}), @@ -7201,9 +7210,9 @@ fn analyzeIsNull( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: *Inst, + operand: Air.Inst.Index, invert_logic: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const result_ty = Type.initTag(.bool); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |opt_val| { if (opt_val.isUndef()) { @@ -7222,8 +7231,8 @@ fn analyzeIsNonErr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: *Inst, -) InnerError!*Inst { + operand: Air.Inst.Index, +) InnerError!Air.Inst.Index { const ot = operand.ty.zigTypeTag(); if (ot != .ErrorSet and ot != .ErrorUnion) return sema.mod.constBool(sema.arena, src, true); if (ot == .ErrorSet) return sema.mod.constBool(sema.arena, src, false); @@ -7243,12 +7252,12 @@ fn analyzeSlice( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: *Inst, - start: *Inst, - end_opt: ?*Inst, - sentinel_opt: ?*Inst, + array_ptr: Air.Inst.Index, + start: Air.Inst.Index, + end_opt: ?Air.Inst.Index, + sentinel_opt: ?Air.Inst.Index, sentinel_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const ptr_child = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -7319,10 +7328,10 @@ fn cmpNumeric( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - lhs: *Inst, - rhs: *Inst, + lhs: Air.Inst.Index, + rhs: Air.Inst.Index, op: std.math.CompareOperator, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { assert(lhs.ty.isNumeric()); assert(rhs.ty.isNumeric()); @@ -7488,7 +7497,7 @@ fn cmpNumeric( return block.addBinOp(src, Type.initTag(.bool), Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } -fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { if (inst.value()) |val| { return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); } @@ -7497,7 +7506,7 @@ fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) return block.addUnOp(inst.src, dest_type, .wrap_optional, inst); } -fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { const err_union = dest_type.castTag(.error_union).?; if (inst.value()) |val| { if (inst.ty.zigTypeTag() != .ErrorSet) { @@ -7568,7 +7577,7 @@ fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst } } -fn resolvePeerTypes(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, instructions: []*Inst) !Type { +fn resolvePeerTypes(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, instructions: []Air.Inst.Index) !Type { if (instructions.len == 0) return Type.initTag(.noreturn); @@ -7704,7 +7713,7 @@ fn getBuiltin( block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!*ir.Inst { +) InnerError!Air.Inst.Index { const mod = sema.mod; const std_pkg = mod.root_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 60e9a96275..4a9087d7f5 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -18,14 +18,14 @@ pub const Word = u32; pub const ResultId = u32; pub const TypeMap = std.HashMap(Type, u32, Type.HashContext64, std.hash_map.default_max_load_percentage); -pub const InstMap = std.AutoHashMap(*Inst, ResultId); +pub const InstMap = std.AutoHashMap(Air.Inst.Index, ResultId); const IncomingBlock = struct { src_label_id: ResultId, break_value_id: ResultId, }; -pub const BlockMap = std.AutoHashMap(*Inst.Block, struct { +pub const BlockMap = std.AutoHashMap(Air.Inst.Index, struct { label_id: ResultId, incoming_blocks: *std.ArrayListUnmanaged(IncomingBlock), }); @@ -279,16 +279,17 @@ pub const DeclGen = struct { return self.spv.module.getTarget(); } - fn fail(self: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) Error { + fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); + const src: LazySrcLoc = .{ .node_offset = 0 }; const src_loc = src.toSrcLocWithDecl(self.decl); self.error_msg = try Module.ErrorMsg.create(self.spv.module.gpa, src_loc, format, args); return error.AnalysisFail; } - fn resolve(self: *DeclGen, inst: *Inst) !ResultId { + fn resolve(self: *DeclGen, inst: Air.Inst.Index) !ResultId { if (inst.value()) |val| { - return self.genConstant(inst.src, inst.ty, val); + return self.genConstant(inst.ty, val); } return self.inst_results.get(inst).?; // Instruction does not dominate all uses! @@ -313,7 +314,7 @@ pub const DeclGen = struct { const target = self.getTarget(); // The backend will never be asked to compiler a 0-bit integer, so we won't have to handle those in this function. - std.debug.assert(bits != 0); + assert(bits != 0); // 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively. // 32-bit integers are always supported (see spec, 2.16.1, Data rules). @@ -387,19 +388,19 @@ pub const DeclGen = struct { .composite_integer }; }, // As of yet, there is no vector support in the self-hosted compiler. - .Vector => self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}), + .Vector => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}), // TODO: For which types is this the case? - else => self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}), + else => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}), }; } /// Generate a constant representing `val`. /// TODO: Deduplication? - fn genConstant(self: *DeclGen, src: LazySrcLoc, ty: Type, val: Value) Error!ResultId { + fn genConstant(self: *DeclGen, ty: Type, val: Value) Error!ResultId { const target = self.getTarget(); const code = &self.spv.binary.types_globals_constants; const result_id = self.spv.allocResultId(); - const result_type_id = try self.genType(src, ty); + const result_type_id = try self.genType(ty); if (val.isUndef()) { try writeInstruction(code, .OpUndef, &[_]Word{ result_type_id, result_id }); @@ -411,13 +412,13 @@ pub const DeclGen = struct { const int_info = ty.intInfo(target); const backing_bits = self.backingIntBits(int_info.bits) orelse { // Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits. - return self.fail(src, "TODO: SPIR-V backend: implement composite int constants for {}", .{ty}); + return self.fail("TODO: SPIR-V backend: implement composite int constants for {}", .{ty}); }; // We can just use toSignedInt/toUnsignedInt here as it returns u64 - a type large enough to hold any // SPIR-V native type (up to i/u64 with Int64). If SPIR-V ever supports native ints of a larger size, this // might need to be updated. - std.debug.assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64)); + assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64)); var int_bits = if (ty.isSignedInt()) @bitCast(u64, val.toSignedInt()) else val.toUnsignedInt(); // Mask the low bits which make up the actual integer. This is to make sure that negative values @@ -469,13 +470,13 @@ pub const DeclGen = struct { } }, .Void => unreachable, - else => return self.fail(src, "TODO: SPIR-V backend: constant generation of type {}", .{ty}), + else => return self.fail("TODO: SPIR-V backend: constant generation of type {}", .{ty}), } return result_id; } - fn genType(self: *DeclGen, src: LazySrcLoc, ty: Type) Error!ResultId { + fn genType(self: *DeclGen, ty: Type) Error!ResultId { // We can't use getOrPut here so we can recursively generate types. if (self.spv.types.get(ty)) |already_generated| { return already_generated; @@ -492,7 +493,7 @@ pub const DeclGen = struct { const int_info = ty.intInfo(target); const backing_bits = self.backingIntBits(int_info.bits) orelse { // Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits. - return self.fail(src, "TODO: SPIR-V backend: implement composite int {}", .{ty}); + return self.fail("TODO: SPIR-V backend: implement composite int {}", .{ty}); }; // TODO: If backing_bits != int_info.bits, a duplicate type might be generated here. @@ -518,7 +519,7 @@ pub const DeclGen = struct { }; if (!supported) { - return self.fail(src, "Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits}); + return self.fail("Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits}); } try writeInstruction(code, .OpTypeFloat, &[_]Word{ result_id, bits }); @@ -526,19 +527,19 @@ pub const DeclGen = struct { .Fn => { // We only support zig-calling-convention functions, no varargs. if (ty.fnCallingConvention() != .Unspecified) - return self.fail(src, "Unsupported calling convention for SPIR-V", .{}); + return self.fail("Unsupported calling convention for SPIR-V", .{}); if (ty.fnIsVarArgs()) - return self.fail(src, "VarArgs unsupported for SPIR-V", .{}); + return self.fail("VarArgs unsupported for SPIR-V", .{}); // In order to avoid a temporary here, first generate all the required types and then simply look them up // when generating the function type. const params = ty.fnParamLen(); var i: usize = 0; while (i < params) : (i += 1) { - _ = try self.genType(src, ty.fnParamType(i)); + _ = try self.genType(ty.fnParamType(i)); } - const return_type_id = try self.genType(src, ty.fnReturnType()); + const return_type_id = try self.genType(ty.fnReturnType()); // result id + result type id + parameter type ids. try writeOpcode(code, .OpTypeFunction, 2 + @intCast(u16, ty.fnParamLen())); @@ -551,7 +552,7 @@ pub const DeclGen = struct { } }, // When recursively generating a type, we cannot infer the pointer's storage class. See genPointerType. - .Pointer => return self.fail(src, "Cannot create pointer with unkown storage class", .{}), + .Pointer => return self.fail("Cannot create pointer with unkown storage class", .{}), .Vector => { // Although not 100% the same, Zig vectors map quite neatly to SPIR-V vectors (including many integer and float operations // which work on them), so simply use those. @@ -561,7 +562,7 @@ pub const DeclGen = struct { // is adequate at all for this. // TODO: Vectors are not yet supported by the self-hosted compiler itself it seems. - return self.fail(src, "TODO: SPIR-V backend: implement type Vector", .{}); + return self.fail("TODO: SPIR-V backend: implement type Vector", .{}); }, .Null, .Undefined, @@ -573,7 +574,7 @@ pub const DeclGen = struct { .BoundFn => unreachable, // this type will be deleted from the language. - else => |tag| return self.fail(src, "TODO: SPIR-V backend: implement type {}s", .{tag}), + else => |tag| return self.fail("TODO: SPIR-V backend: implement type {}s", .{tag}), } try self.spv.types.putNoClobber(ty, result_id); @@ -582,8 +583,8 @@ pub const DeclGen = struct { /// SPIR-V requires pointers to have a storage class (address space), and so we have a special function for that. /// TODO: The result of this needs to be cached. - fn genPointerType(self: *DeclGen, src: LazySrcLoc, ty: Type, storage_class: spec.StorageClass) !ResultId { - std.debug.assert(ty.zigTypeTag() == .Pointer); + fn genPointerType(self: *DeclGen, ty: Type, storage_class: spec.StorageClass) !ResultId { + assert(ty.zigTypeTag() == .Pointer); const code = &self.spv.binary.types_globals_constants; const result_id = self.spv.allocResultId(); @@ -591,7 +592,7 @@ pub const DeclGen = struct { // TODO: There are many constraints which are ignored for now: We may only create pointers to certain types, and to other types // if more capabilities are enabled. For example, we may only create pointers to f16 if Float16Buffer is enabled. // These also relates to the pointer's address space. - const child_id = try self.genType(src, ty.elemType()); + const child_id = try self.genType(ty.elemType()); try writeInstruction(code, .OpTypePointer, &[_]Word{ result_id, @enumToInt(storage_class), child_id }); @@ -602,9 +603,9 @@ pub const DeclGen = struct { const decl = self.decl; const result_id = decl.fn_link.spirv.id; - if (decl.val.castTag(.function)) |func_payload| { - std.debug.assert(decl.ty.zigTypeTag() == .Fn); - const prototype_id = try self.genType(.{ .node_offset = 0 }, decl.ty); + if (decl.val.castTag(.function)) |_| { + assert(decl.ty.zigTypeTag() == .Fn); + const prototype_id = try self.genType(decl.ty); try writeInstruction(&self.spv.binary.fn_decls, .OpFunction, &[_]Word{ self.spv.types.get(decl.ty.fnReturnType()).?, // This type should be generated along with the prototype. result_id, @@ -631,189 +632,167 @@ pub const DeclGen = struct { try writeInstruction(&self.spv.binary.fn_decls, .OpLabel, &[_]Word{root_block_id}); self.current_block_label_id = root_block_id; - try self.genBody(func_payload.data.body); + const main_body = self.air.getMainBody(); + try self.genBody(main_body); // Append the actual code into the fn_decls section. try self.spv.binary.fn_decls.appendSlice(self.code.items); try writeInstruction(&self.spv.binary.fn_decls, .OpFunctionEnd, &[_]Word{}); } else { - return self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: generate decl type {}", .{decl.ty.zigTypeTag()}); + return self.fail("TODO: SPIR-V backend: generate decl type {}", .{decl.ty.zigTypeTag()}); } } - fn genBody(self: *DeclGen, body: ir.Body) Error!void { - for (body.instructions) |inst| { + fn genBody(self: *DeclGen, body: []const Air.Inst.Index) Error!void { + for (body) |inst| { try self.genInst(inst); } } - fn genInst(self: *DeclGen, inst: *Inst) !void { - const result_id = switch (inst.tag) { - .add, .addwrap => try self.genBinOp(inst.castTag(.add).?), - .sub, .subwrap => try self.genBinOp(inst.castTag(.sub).?), - .mul, .mulwrap => try self.genBinOp(inst.castTag(.mul).?), - .div => try self.genBinOp(inst.castTag(.div).?), - .bit_and => try self.genBinOp(inst.castTag(.bit_and).?), - .bit_or => try self.genBinOp(inst.castTag(.bit_or).?), - .xor => try self.genBinOp(inst.castTag(.xor).?), - .cmp_eq => try self.genCmp(inst.castTag(.cmp_eq).?), - .cmp_neq => try self.genCmp(inst.castTag(.cmp_neq).?), - .cmp_gt => try self.genCmp(inst.castTag(.cmp_gt).?), - .cmp_gte => try self.genCmp(inst.castTag(.cmp_gte).?), - .cmp_lt => try self.genCmp(inst.castTag(.cmp_lt).?), - .cmp_lte => try self.genCmp(inst.castTag(.cmp_lte).?), - .bool_and => try self.genBinOp(inst.castTag(.bool_and).?), - .bool_or => try self.genBinOp(inst.castTag(.bool_or).?), - .not => try self.genUnOp(inst.castTag(.not).?), - .alloc => try self.genAlloc(inst.castTag(.alloc).?), - .arg => self.genArg(), - .block => (try self.genBlock(inst.castTag(.block).?)) orelse return, - .br => return try self.genBr(inst.castTag(.br).?), - .br_void => return try self.genBrVoid(inst.castTag(.br_void).?), - // TODO: Breakpoints won't be supported in SPIR-V, but the compiler seems to insert them - // throughout the IR. + fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void { + const air_tags = self.air.instructions.items(.tag); + const result_id = switch (air_tags[inst]) { + // zig fmt: off + .add, .addwrap => try self.genArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}), + .sub, .subwrap => try self.genArithOp(inst, .{.OpFSub, .OpISub, .OpISub}), + .mul, .mulwrap => try self.genArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}), + .div => try self.genArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}), + + .bit_and => try self.genBinOpSimple(inst, .OpBitwiseAnd), + .bit_or => try self.genBinOpSimple(inst, .OpBitwiseOr), + .xor => try self.genBinOpSimple(inst, .OpBitwiseXor), + .bool_and => try self.genBinOpSimple(inst, .OpLogicalAnd), + .bool_or => try self.genBinOpSimple(inst, .OpLogicalOr), + + .not => try self.genNot(inst), + + .cmp_eq => try self.genCmp(inst, .{.OpFOrdEqual, .OpLogicalEqual, .OpIEqual}), + .cmp_neq => try self.genCmp(inst, .{.OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual}), + .cmp_gt => try self.genCmp(inst, .{.OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan}), + .cmp_gte => try self.genCmp(inst, .{.OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual}), + .cmp_lt => try self.genCmp(inst, .{.OpFOrdLessThan, .OpSLessThan, .OpULessThan}), + .cmp_lte => try self.genCmp(inst, .{.OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual}), + + .arg => self.genArg(), + .alloc => try self.genAlloc(inst), + .block => (try self.genBlock(inst)) orelse return, + .load => try self.genLoad(inst), + + .br => return self.genBr(inst), .breakpoint => return, - .condbr => return try self.genCondBr(inst.castTag(.condbr).?), - .constant => unreachable, - .dbg_stmt => return try self.genDbgStmt(inst.castTag(.dbg_stmt).?), - .load => try self.genLoad(inst.castTag(.load).?), - .loop => return try self.genLoop(inst.castTag(.loop).?), - .ret => return try self.genRet(inst.castTag(.ret).?), - .retvoid => return try self.genRetVoid(), - .store => return try self.genStore(inst.castTag(.store).?), - .unreach => return try self.genUnreach(), - else => return self.fail(inst.src, "TODO: SPIR-V backend: implement inst {s}", .{@tagName(inst.tag)}), + .condbr => return self.genCondBr(inst), + .constant => unreachable, + .dbg_stmt => return self.genDbgStmt(inst), + .loop => return self.genLoop(inst), + .ret => return self.genRet(inst), + .store => return self.genStore(inst), + .unreach => return self.genUnreach(), + // zig fmt: on }; try self.inst_results.putNoClobber(inst, result_id); } - fn genBinOp(self: *DeclGen, inst: *Inst.BinOp) !ResultId { - // TODO: Will lhs and rhs have the same type? - const lhs_id = try self.resolve(inst.lhs); - const rhs_id = try self.resolve(inst.rhs); + fn genBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, opcode: Opcode) !ResultId { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs_id = try self.resolve(bin_op.lhs); + const rhs_id = try self.resolve(bin_op.rhs); + const result_id = self.spv.allocResultId(); + try writeInstruction(&self.code, opcode, &[_]Word{ + result_type_id, result_id, lhs_id, rhs_id, + }); + return result_id; + } + + fn genArithOp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { + // LHS and RHS are guaranteed to have the same type, and AIR guarantees + // the result to be the same as the LHS and RHS, which matches SPIR-V. + const ty = self.air.getType(inst); + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs_id = try self.resolve(bin_op.lhs); + const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocResultId(); - const result_type_id = try self.genType(inst.base.src, inst.base.ty); - - // TODO: Is the result the same as the argument types? - // This is supposed to be the case for SPIR-V. - std.debug.assert(inst.rhs.ty.eql(inst.lhs.ty)); - std.debug.assert(inst.base.ty.tag() == .bool or inst.base.ty.eql(inst.lhs.ty)); - - // Binary operations are generally applicable to both scalar and vector operations in SPIR-V, but int and float - // versions of operations require different opcodes. - // For operations which produce bools, the information of inst.base.ty is not useful, so just pick either operand - // instead. - const info = try self.arithmeticTypeInfo(inst.lhs.ty); - - if (info.class == .composite_integer) { - return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for composite integers", .{}); - } else if (info.class == .strange_integer) { - return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for strange integers", .{}); - } + const result_type_id = try self.genType(ty); + + assert(self.air.getType(bin_op.lhs).eql(ty)); + assert(self.air.getType(bin_op.rhs).eql(ty)); - const is_float = info.class == .float; - const is_signed = info.signedness == .signed; - // **Note**: All these operations must be valid for vectors as well! - const opcode = switch (inst.base.tag) { - // The regular integer operations are all defined for wrapping. Since theyre only relevant for integers, - // we can just switch on both cases here. - .add, .addwrap => if (is_float) Opcode.OpFAdd else Opcode.OpIAdd, - .sub, .subwrap => if (is_float) Opcode.OpFSub else Opcode.OpISub, - .mul, .mulwrap => if (is_float) Opcode.OpFMul else Opcode.OpIMul, - // TODO: Trap if divisor is 0? - // TODO: Figure out of OpSDiv for unsigned/OpUDiv for signed does anything useful. - // => Those are probably for divTrunc and divFloor, though the compiler does not yet generate those. - // => TODO: Figure out how those work on the SPIR-V side. - // => TODO: Test these. - .div => if (is_float) Opcode.OpFDiv else if (is_signed) Opcode.OpSDiv else Opcode.OpUDiv, - // Only integer versions for these. - .bit_and => Opcode.OpBitwiseAnd, - .bit_or => Opcode.OpBitwiseOr, - .xor => Opcode.OpBitwiseXor, - // Bool -> bool operations. - .bool_and => Opcode.OpLogicalAnd, - .bool_or => Opcode.OpLogicalOr, + // Binary operations are generally applicable to both scalar and vector operations + // in SPIR-V, but int and float versions of operations require different opcodes. + const info = try self.arithmeticTypeInfo(ty); + + const opcode_index: usize = switch (info.class) { + .composite_integer => { + return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{}); + }, + .strange_integer => { + return self.fail("TODO: SPIR-V backend: binary operations for strange integers", .{}); + }, + .integer => switch (info.signedness) { + .signed => 1, + .unsigned => 2, + }, + .float => 0, else => unreachable, }; - + const opcode = ops[opcode_index]; try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id }); // TODO: Trap on overflow? Probably going to be annoying. // TODO: Look into SPV_KHR_no_integer_wrap_decoration which provides NoSignedWrap/NoUnsignedWrap. - if (info.class != .strange_integer) - return result_id; - - return self.fail(inst.base.src, "TODO: SPIR-V backend: strange integer operation mask", .{}); + return result_id; } - fn genCmp(self: *DeclGen, inst: *Inst.BinOp) !ResultId { - const lhs_id = try self.resolve(inst.lhs); - const rhs_id = try self.resolve(inst.rhs); - + fn genCmp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs_id = try self.resolve(bin_op.lhs); + const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocResultId(); - const result_type_id = try self.genType(inst.base.src, inst.base.ty); - - // All of these operations should be 2 equal types -> bool - std.debug.assert(inst.rhs.ty.eql(inst.lhs.ty)); - std.debug.assert(inst.base.ty.tag() == .bool); - - // Comparisons are generally applicable to both scalar and vector operations in SPIR-V, but int and float - // versions of operations require different opcodes. - // Since inst.base.ty is always bool and so not very useful, and because both arguments must be the same, just get the info - // from either of the operands. - const info = try self.arithmeticTypeInfo(inst.lhs.ty); - - if (info.class == .composite_integer) { - return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for composite integers", .{}); - } else if (info.class == .strange_integer) { - return self.fail(inst.base.src, "TODO: SPIR-V backend: comparison for strange integers", .{}); - } + const result_type_id = try self.genType(Type.initTag(.bool)); + const op_ty = self.air.getType(bin_op.lhs); + assert(op_ty.eql(self.air.getType(bin_op.rhs))); - const is_bool = info.class == .bool; - const is_float = info.class == .float; - const is_signed = info.signedness == .signed; - - // **Note**: All these operations must be valid for vectors as well! - // For floating points, we generally want ordered operations (which return false if either operand is nan). - const opcode = switch (inst.base.tag) { - .cmp_eq => if (is_float) Opcode.OpFOrdEqual else if (is_bool) Opcode.OpLogicalEqual else Opcode.OpIEqual, - .cmp_neq => if (is_float) Opcode.OpFOrdNotEqual else if (is_bool) Opcode.OpLogicalNotEqual else Opcode.OpINotEqual, - // TODO: Verify that these OpFOrd type operations produce the right value. - // TODO: Is there a more fundamental difference between OpU and OpS operations here than just the type? - .cmp_gt => if (is_float) Opcode.OpFOrdGreaterThan else if (is_signed) Opcode.OpSGreaterThan else Opcode.OpUGreaterThan, - .cmp_gte => if (is_float) Opcode.OpFOrdGreaterThanEqual else if (is_signed) Opcode.OpSGreaterThanEqual else Opcode.OpUGreaterThanEqual, - .cmp_lt => if (is_float) Opcode.OpFOrdLessThan else if (is_signed) Opcode.OpSLessThan else Opcode.OpULessThan, - .cmp_lte => if (is_float) Opcode.OpFOrdLessThanEqual else if (is_signed) Opcode.OpSLessThanEqual else Opcode.OpULessThanEqual, + // Comparisons are generally applicable to both scalar and vector operations in SPIR-V, + // but int and float versions of operations require different opcodes. + const info = try self.arithmeticTypeInfo(op_ty); + + const opcode_index: usize = switch (info.class) { + .composite_integer => { + return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{}); + }, + .strange_integer => { + return self.fail("TODO: SPIR-V backend: comparison for strange integers", .{}); + }, + .float => 0, + .bool => 1, + .integer => switch (info.signedness) { + .signed => 1, + .unsigned => 2, + }, else => unreachable, }; + const opcode = ops[opcode_index]; try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id }); return result_id; } - fn genUnOp(self: *DeclGen, inst: *Inst.UnOp) !ResultId { - const operand_id = try self.resolve(inst.operand); - + fn genNot(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand_id = try self.resolve(ty_op.operand); const result_id = self.spv.allocResultId(); - const result_type_id = try self.genType(inst.base.src, inst.base.ty); - - const opcode = switch (inst.base.tag) { - // Bool -> bool - .not => Opcode.OpLogicalNot, - else => unreachable, - }; - + const result_type_id = try self.genType(Type.initTag(.bool)); + const opcode: Opcode = .OpLogicalNot; try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, operand_id }); - return result_id; } - fn genAlloc(self: *DeclGen, inst: *Inst.NoOp) !ResultId { + fn genAlloc(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + const ty = self.air.getType(inst); const storage_class = spec.StorageClass.Function; - const result_type_id = try self.genPointerType(inst.base.src, inst.base.ty, storage_class); + const result_type_id = try self.genPointerType(ty, storage_class); const result_id = self.spv.allocResultId(); // Rather than generating into code here, we're just going to generate directly into the fn_decls section so that @@ -828,7 +807,7 @@ pub const DeclGen = struct { return self.args.items[self.next_arg_index]; } - fn genBlock(self: *DeclGen, inst: *Inst.Block) !?ResultId { + fn genBlock(self: *DeclGen, inst: Air.Inst.Index) !?ResultId { // In IR, a block doesn't really define an entry point like a block, but more like a scope that breaks can jump out of and // "return" a value from. This cannot be directly modelled in SPIR-V, so in a block instruction, we're going to split up // the current block by first generating the code of the block, then a label, and then generate the rest of the current @@ -848,11 +827,16 @@ pub const DeclGen = struct { incoming_blocks.deinit(self.spv.gpa); } - try self.genBody(inst.body); + const ty = self.air.getType(inst); + const inst_datas = self.air.instructions.items(.data); + const extra = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + const body = self.air.extra[extra.end..][0..extra.data.body_len]; + + try self.genBody(body); try self.beginSPIRVBlock(label_id); // If this block didn't produce a value, simply return here. - if (!inst.base.ty.hasCodeGenBits()) + if (!ty.hasCodeGenBits()) return null; // Combine the result from the blocks using the Phi instruction. @@ -862,7 +846,7 @@ pub const DeclGen = struct { // TODO: OpPhi is limited in the types that it may produce, such as pointers. Figure out which other types // are not allowed to be created from a phi node, and throw an error for those. For now, genType already throws // an error for pointers. - const result_type_id = try self.genType(inst.base.src, inst.base.ty); + const result_type_id = try self.genType(ty); _ = result_type_id; try writeOpcode(&self.code, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent... @@ -874,30 +858,26 @@ pub const DeclGen = struct { return result_id; } - fn genBr(self: *DeclGen, inst: *Inst.Br) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - const target = self.blocks.get(inst.block).?; + fn genBr(self: *DeclGen, inst: Air.Inst.Index) !void { + const br = self.air.instructions.items(.data)[inst].br; + const block = self.blocks.get(br.block_inst).?; + const operand_ty = self.air.getType(br.operand); - // TODO: For some reason, br is emitted with void parameters. - if (inst.operand.ty.hasCodeGenBits()) { - const operand_id = try self.resolve(inst.operand); + if (operand_ty.hasCodeGenBits()) { + const operand_id = try self.resolve(br.operand); // current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body. - try target.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id }); + try block.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id }); } - try writeInstruction(&self.code, .OpBranch, &[_]Word{target.label_id}); - } - - fn genBrVoid(self: *DeclGen, inst: *Inst.BrVoid) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - const target = self.blocks.get(inst.block).?; - // Don't need to add this to the incoming block list, as there is no value to insert in the phi node anyway. - try writeInstruction(&self.code, .OpBranch, &[_]Word{target.label_id}); + try writeInstruction(&self.code, .OpBranch, &[_]Word{block.label_id}); } fn genCondBr(self: *DeclGen, inst: *Inst.CondBr) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - const condition_id = try self.resolve(inst.condition); + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const cond_br = self.air.extraData(Air.CondBr, pl_op.payload); + const then_body = self.air.extra[cond_br.end..][0..cond_br.data.then_body_len]; + const else_body = self.air.extra[cond_br.end + then_body.len ..][0..cond_br.data.else_body_len]; + const condition_id = try self.resolve(pl_op.operand); // These will always generate a new SPIR-V block, since they are ir.Body and not ir.Block. const then_label_id = self.spv.allocResultId(); @@ -913,23 +893,26 @@ pub const DeclGen = struct { }); try self.beginSPIRVBlock(then_label_id); - try self.genBody(inst.then_body); + try self.genBody(then_body); try self.beginSPIRVBlock(else_label_id); - try self.genBody(inst.else_body); + try self.genBody(else_body); } - fn genDbgStmt(self: *DeclGen, inst: *Inst.DbgStmt) !void { + fn genDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { + const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; const src_fname_id = try self.spv.resolveSourceFileName(self.decl); - try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, inst.line, inst.column }); + try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, dbg_stmt.line, dbg_stmt.column }); } - fn genLoad(self: *DeclGen, inst: *Inst.UnOp) !ResultId { - const operand_id = try self.resolve(inst.operand); + fn genLoad(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand_id = try self.resolve(ty_op.operand); + const ty = self.air.getType(inst); - const result_type_id = try self.genType(inst.base.src, inst.base.ty); + const result_type_id = try self.genType(ty); const result_id = self.spv.allocResultId(); - const operands = if (inst.base.ty.isVolatilePtr()) + const operands = if (ty.isVolatilePtr()) &[_]Word{ result_type_id, result_id, operand_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) } else &[_]Word{ result_type_id, result_id, operand_id }; @@ -939,8 +922,9 @@ pub const DeclGen = struct { return result_id; } - fn genLoop(self: *DeclGen, inst: *Inst.Loop) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? + fn genLoop(self: *DeclGen, inst: Air.Inst.Index) !void { + const loop = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + const body = self.air.extra[loop.end..][0..loop.data.body_len]; const loop_label_id = self.spv.allocResultId(); // Jump to the loop entry point @@ -949,27 +933,29 @@ pub const DeclGen = struct { // TODO: Look into OpLoopMerge. try self.beginSPIRVBlock(loop_label_id); - try self.genBody(inst.body); + try self.genBody(body); try writeInstruction(&self.code, .OpBranch, &[_]Word{loop_label_id}); } - fn genRet(self: *DeclGen, inst: *Inst.UnOp) !void { - const operand_id = try self.resolve(inst.operand); - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id}); - } - - fn genRetVoid(self: *DeclGen) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - try writeInstruction(&self.code, .OpReturn, &[_]Word{}); + fn genRet(self: *DeclGen, inst: Air.Inst.Index) !void { + const operand = inst_datas[inst].un_op; + const operand_ty = self.air.getType(operand); + if (operand_ty.hasCodeGenBits()) { + const operand_id = try self.resolve(operand); + try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id}); + } else { + try writeInstruction(&self.code, .OpReturn, &[_]Word{}); + } } - fn genStore(self: *DeclGen, inst: *Inst.BinOp) !void { - const dst_ptr_id = try self.resolve(inst.lhs); - const src_val_id = try self.resolve(inst.rhs); + fn genStore(self: *DeclGen, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const dst_ptr_id = try self.resolve(bin_op.lhs); + const src_val_id = try self.resolve(bin_op.rhs); + const lhs_ty = self.air.getType(bin_op.lhs); - const operands = if (inst.lhs.ty.isVolatilePtr()) + const operands = if (lhs_ty.isVolatilePtr()) &[_]Word{ dst_ptr_id, src_val_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) } else &[_]Word{ dst_ptr_id, src_val_id }; @@ -978,7 +964,6 @@ pub const DeclGen = struct { } fn genUnreach(self: *DeclGen) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? try writeInstruction(&self.code, .OpUnreachable, &[_]Word{}); } }; -- cgit v1.2.3 From 913393fd3b986dd262a8419341dced9ad5d9620d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 12 Jul 2021 15:30:30 -0700 Subject: stage2: first pass over Module.zig for AIR memory layout --- BRANCH_TODO | 122 ++++++++++++++++ src/Air.zig | 14 +- src/AstGen.zig | 2 +- src/Module.zig | 359 ++++------------------------------------------- src/Sema.zig | 114 ++++++++++++++- src/codegen.zig | 196 ++++++++++++++------------ src/codegen/spirv.zig | 57 +++++--- src/link/SpirV.zig | 4 + src/register_manager.zig | 16 +-- 9 files changed, 429 insertions(+), 455 deletions(-) (limited to 'src/Module.zig') diff --git a/BRANCH_TODO b/BRANCH_TODO index be3959e035..585c8adf44 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -568,3 +568,125 @@ const DumpAir = struct { } } }; + +pub fn constInst(mod: *Module, arena: *Allocator, src: LazySrcLoc, typed_value: TypedValue) !*ir.Inst { + _ = mod; + const const_inst = try arena.create(ir.Inst.Constant); + const_inst.* = .{ + .base = .{ + .tag = ir.Inst.Constant.base_tag, + .ty = typed_value.ty, + .src = src, + }, + .val = typed_value.val, + }; + return &const_inst.base; +} + +pub fn constType(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = Type.initTag(.type), + .val = try ty.toValue(arena), + }); +} + +pub fn constVoid(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = Type.initTag(.void), + .val = Value.initTag(.void_value), + }); +} + +pub fn constNoReturn(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = Type.initTag(.noreturn), + .val = Value.initTag(.unreachable_value), + }); +} + +pub fn constUndef(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = ty, + .val = Value.initTag(.undef), + }); +} + +pub fn constBool(mod: *Module, arena: *Allocator, src: LazySrcLoc, v: bool) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = Type.initTag(.bool), + .val = ([2]Value{ Value.initTag(.bool_false), Value.initTag(.bool_true) })[@boolToInt(v)], + }); +} + +pub fn constIntUnsigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: u64) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = ty, + .val = try Value.Tag.int_u64.create(arena, int), + }); +} + +pub fn constIntSigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: i64) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = ty, + .val = try Value.Tag.int_i64.create(arena, int), + }); +} + +pub fn constIntBig(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, big_int: BigIntConst) !*ir.Inst { + if (big_int.positive) { + if (big_int.to(u64)) |x| { + return mod.constIntUnsigned(arena, src, ty, x); + } else |err| switch (err) { + error.NegativeIntoUnsigned => unreachable, + error.TargetTooSmall => {}, // handled below + } + return mod.constInst(arena, src, .{ + .ty = ty, + .val = try Value.Tag.int_big_positive.create(arena, big_int.limbs), + }); + } else { + if (big_int.to(i64)) |x| { + return mod.constIntSigned(arena, src, ty, x); + } else |err| switch (err) { + error.NegativeIntoUnsigned => unreachable, + error.TargetTooSmall => {}, // handled below + } + return mod.constInst(arena, src, .{ + .ty = ty, + .val = try Value.Tag.int_big_negative.create(arena, big_int.limbs), + }); + } +} + +pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { + const zir_module = scope.namespace(); + const source = zir_module.getSource(mod) catch @panic("dumpInst failed to get source"); + const loc = std.zig.findLineColumn(source, inst.src); + if (inst.tag == .constant) { + std.debug.print("constant ty={} val={} src={s}:{d}:{d}\n", .{ + inst.ty, + inst.castTag(.constant).?.val, + zir_module.subFilePath(), + loc.line + 1, + loc.column + 1, + }); + } else if (inst.deaths == 0) { + std.debug.print("{s} ty={} src={s}:{d}:{d}\n", .{ + @tagName(inst.tag), + inst.ty, + zir_module.subFilePath(), + loc.line + 1, + loc.column + 1, + }); + } else { + std.debug.print("{s} ty={} deaths={b} src={s}:{d}:{d}\n", .{ + @tagName(inst.tag), + inst.ty, + inst.deaths, + zir_module.subFilePath(), + loc.line + 1, + loc.column + 1, + }); + } +} + diff --git a/src/Air.zig b/src/Air.zig index 112845559d..e85f2e5c43 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -29,8 +29,11 @@ pub const Inst = struct { data: Data, pub const Tag = enum(u8) { - /// The first N instructions in Air must be one arg instruction per function parameter. - /// Uses the `ty` field. + /// The first N instructions in the main block must be one arg instruction per + /// function parameter. This makes function parameters participate in + /// liveness analysis without any special handling. + /// Uses the `ty_str` field. + /// The string is the parameter name. arg, /// Float or integer addition. For integers, wrapping is undefined behavior. /// Both operands are guaranteed to be the same type, and the result type @@ -131,6 +134,8 @@ pub const Inst = struct { /// A comptime-known value. Uses the `ty_pl` field, payload is index of /// `values` array. constant, + /// A comptime-known type. Uses the `ty` field. + const_ty, /// Notes the beginning of a source code statement and marks the line and column. /// Result type is always void. /// Uses the `dbg_stmt` field. @@ -289,6 +294,11 @@ pub const Inst = struct { // Index into a different array. payload: u32, }, + ty_str: struct { + ty: Ref, + // ZIR string table index. + str: u32, + }, br: struct { block_inst: Index, operand: Ref, diff --git a/src/AstGen.zig b/src/AstGen.zig index 19906c94d3..24766aaf60 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -9821,7 +9821,7 @@ fn advanceSourceCursor(astgen: *AstGen, source: []const u8, end: usize) void { astgen.source_column = column; } -const ref_start_index = Zir.Inst.Ref.typed_value_map.len; +const ref_start_index: u32 = Zir.Inst.Ref.typed_value_map.len; fn indexToRef(inst: Zir.Inst.Index) Zir.Inst.Ref { return @intToEnum(Zir.Inst.Ref, ref_start_index + inst); diff --git a/src/Module.zig b/src/Module.zig index 6273243ee2..8971a57487 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1155,7 +1155,7 @@ pub const Scope = struct { /// This can vary during inline or comptime function calls. See `Sema.owner_decl` /// for the one that will be the same for all Block instances. src_decl: *Decl, - instructions: ArrayListUnmanaged(*ir.Inst), + instructions: ArrayListUnmanaged(Air.Inst.Index), label: ?*Label = null, inlining: ?*Inlining, /// If runtime_index is not 0 then one of these is guaranteed to be non null. @@ -1187,14 +1187,14 @@ pub const Scope = struct { }; pub const Merges = struct { - block_inst: *ir.Inst.Block, + block_inst: Air.Inst.Index, /// Separate array list from break_inst_list so that it can be passed directly /// to resolvePeerTypes. - results: ArrayListUnmanaged(*ir.Inst), + results: ArrayListUnmanaged(Air.Inst.Index), /// Keeps track of the break instructions so that the operand can be replaced /// if we need to add type coercion at the end of block analysis. /// Same indexes, capacity, length as `results`. - br_list: ArrayListUnmanaged(*ir.Inst.Br), + br_list: ArrayListUnmanaged(Air.Inst.Index), }; /// For debugging purposes. @@ -1230,187 +1230,6 @@ pub const Scope = struct { pub fn getFileScope(block: *Block) *Scope.File { return block.src_decl.namespace.file_scope; } - - pub fn addNoOp( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - comptime tag: ir.Inst.Tag, - ) !*ir.Inst { - const inst = try block.sema.arena.create(tag.Type()); - inst.* = .{ - .base = .{ - .tag = tag, - .ty = ty, - .src = src, - }, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addUnOp( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - tag: ir.Inst.Tag, - operand: *ir.Inst, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.UnOp); - inst.* = .{ - .base = .{ - .tag = tag, - .ty = ty, - .src = src, - }, - .operand = operand, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addBinOp( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - tag: ir.Inst.Tag, - lhs: *ir.Inst, - rhs: *ir.Inst, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.BinOp); - inst.* = .{ - .base = .{ - .tag = tag, - .ty = ty, - .src = src, - }, - .lhs = lhs, - .rhs = rhs, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addBr( - scope_block: *Scope.Block, - src: LazySrcLoc, - target_block: *ir.Inst.Block, - operand: *ir.Inst, - ) !*ir.Inst.Br { - const inst = try scope_block.sema.arena.create(ir.Inst.Br); - inst.* = .{ - .base = .{ - .tag = .br, - .ty = Type.initTag(.noreturn), - .src = src, - }, - .operand = operand, - .block = target_block, - }; - try scope_block.instructions.append(scope_block.sema.gpa, &inst.base); - return inst; - } - - pub fn addCondBr( - block: *Scope.Block, - src: LazySrcLoc, - condition: *ir.Inst, - then_body: ir.Body, - else_body: ir.Body, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.CondBr); - inst.* = .{ - .base = .{ - .tag = .condbr, - .ty = Type.initTag(.noreturn), - .src = src, - }, - .condition = condition, - .then_body = then_body, - .else_body = else_body, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addCall( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - func: *ir.Inst, - args: []const *ir.Inst, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.Call); - inst.* = .{ - .base = .{ - .tag = .call, - .ty = ty, - .src = src, - }, - .func = func, - .args = args, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addSwitchBr( - block: *Scope.Block, - src: LazySrcLoc, - operand: *ir.Inst, - cases: []ir.Inst.SwitchBr.Case, - else_body: ir.Body, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.SwitchBr); - inst.* = .{ - .base = .{ - .tag = .switchbr, - .ty = Type.initTag(.noreturn), - .src = src, - }, - .target = operand, - .cases = cases, - .else_body = else_body, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addDbgStmt(block: *Scope.Block, src: LazySrcLoc, line: u32, column: u32) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.DbgStmt); - inst.* = .{ - .base = .{ - .tag = .dbg_stmt, - .ty = Type.initTag(.void), - .src = src, - }, - .line = line, - .column = column, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addStructFieldPtr( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - struct_ptr: *ir.Inst, - field_index: u32, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.StructFieldPtr); - inst.* = .{ - .base = .{ - .tag = .struct_field_ptr, - .ty = ty, - .src = src, - }, - .struct_ptr = struct_ptr, - .field_index = field_index, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } }; }; @@ -3594,30 +3413,14 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { defer decl.value_arena.?.* = arena.state; const fn_ty = decl.ty; - const param_inst_list = try gpa.alloc(*ir.Inst, fn_ty.fnParamLen()); + const param_inst_list = try gpa.alloc(Air.Inst.Index, fn_ty.fnParamLen()); defer gpa.free(param_inst_list); - for (param_inst_list) |*param_inst, param_index| { - const param_type = fn_ty.fnParamType(param_index); - const arg_inst = try arena.allocator.create(ir.Inst.Arg); - arg_inst.* = .{ - .base = .{ - .tag = .arg, - .ty = param_type, - .src = .unneeded, - }, - .name = undefined, // Set in the semantic analysis of the arg instruction. - }; - param_inst.* = &arg_inst.base; - } - - const zir = decl.namespace.file_scope.zir; - var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = &arena.allocator, - .code = zir, + .code = decl.namespace.file_scope.zir, .owner_decl = decl, .namespace = decl.namespace, .func = func, @@ -3641,7 +3444,21 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { }; defer inner_block.instructions.deinit(gpa); - // AIR currently requires the arg parameters to be the first N instructions + // AIR requires the arg parameters to be the first N instructions. + for (param_inst_list) |*param_inst, param_index| { + const param_type = fn_ty.fnParamType(param_index); + const ty_ref = try sema.addType(param_type); + param_inst.* = @intCast(u32, sema.air_instructions.len); + try sema.air_instructions.append(gpa, .{ + .tag = .arg, + .data = .{ + .ty_str = .{ + .ty = ty_ref, + .str = undefined, // Set in the semantic analysis of the arg instruction. + }, + }, + }); + } try inner_block.instructions.appendSlice(gpa, param_inst_list); func.state = .in_progress; @@ -3650,17 +3467,21 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { try sema.analyzeFnBody(&inner_block, func.zir_body_inst); // Copy the block into place and mark that as the main block. - sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = sema.air_extra.items.len; - try sema.air_extra.appendSlice(inner_block.instructions.items); + try sema.air_extra.ensureUnusedCapacity(gpa, inner_block.instructions.items.len + 1); + const main_block_index = sema.addExtraAssumeCapacity(Air.Block{ + .body_len = @intCast(u32, inner_block.instructions.items.len), + }); + sema.air_extra.appendSliceAssumeCapacity(inner_block.instructions.items); + sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = main_block_index; func.state = .success; log.debug("set {s} to success", .{decl.name}); return Air{ .instructions = sema.air_instructions.toOwnedSlice(), - .extra = sema.air_extra.toOwnedSlice(), - .values = sema.air_values.toOwnedSlice(), - .variables = sema.air_variables.toOwnedSlice(), + .extra = sema.air_extra.toOwnedSlice(gpa), + .values = sema.air_values.toOwnedSlice(gpa), + .variables = sema.air_variables.toOwnedSlice(gpa), }; } @@ -3815,94 +3636,6 @@ pub fn analyzeExport( de_gop.value_ptr.*[de_gop.value_ptr.len - 1] = new_export; errdefer de_gop.value_ptr.* = mod.gpa.shrink(de_gop.value_ptr.*, de_gop.value_ptr.len - 1); } -pub fn constInst(mod: *Module, arena: *Allocator, src: LazySrcLoc, typed_value: TypedValue) !*ir.Inst { - _ = mod; - const const_inst = try arena.create(ir.Inst.Constant); - const_inst.* = .{ - .base = .{ - .tag = ir.Inst.Constant.base_tag, - .ty = typed_value.ty, - .src = src, - }, - .val = typed_value.val, - }; - return &const_inst.base; -} - -pub fn constType(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.type), - .val = try ty.toValue(arena), - }); -} - -pub fn constVoid(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.void), - .val = Value.initTag(.void_value), - }); -} - -pub fn constNoReturn(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.noreturn), - .val = Value.initTag(.unreachable_value), - }); -} - -pub fn constUndef(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = ty, - .val = Value.initTag(.undef), - }); -} - -pub fn constBool(mod: *Module, arena: *Allocator, src: LazySrcLoc, v: bool) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.bool), - .val = ([2]Value{ Value.initTag(.bool_false), Value.initTag(.bool_true) })[@boolToInt(v)], - }); -} - -pub fn constIntUnsigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: u64) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_u64.create(arena, int), - }); -} - -pub fn constIntSigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: i64) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_i64.create(arena, int), - }); -} - -pub fn constIntBig(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, big_int: BigIntConst) !*ir.Inst { - if (big_int.positive) { - if (big_int.to(u64)) |x| { - return mod.constIntUnsigned(arena, src, ty, x); - } else |err| switch (err) { - error.NegativeIntoUnsigned => unreachable, - error.TargetTooSmall => {}, // handled below - } - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_big_positive.create(arena, big_int.limbs), - }); - } else { - if (big_int.to(i64)) |x| { - return mod.constIntSigned(arena, src, ty, x); - } else |err| switch (err) { - error.NegativeIntoUnsigned => unreachable, - error.TargetTooSmall => {}, // handled below - } - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_big_negative.create(arena, big_int.limbs), - }); - } -} pub fn deleteAnonDecl(mod: *Module, scope: *Scope, decl: *Decl) void { const scope_decl = scope.ownerDecl().?; @@ -4438,38 +4171,6 @@ pub fn errorUnionType( }); } -pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { - const zir_module = scope.namespace(); - const source = zir_module.getSource(mod) catch @panic("dumpInst failed to get source"); - const loc = std.zig.findLineColumn(source, inst.src); - if (inst.tag == .constant) { - std.debug.print("constant ty={} val={} src={s}:{d}:{d}\n", .{ - inst.ty, - inst.castTag(.constant).?.val, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } else if (inst.deaths == 0) { - std.debug.print("{s} ty={} src={s}:{d}:{d}\n", .{ - @tagName(inst.tag), - inst.ty, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } else { - std.debug.print("{s} ty={} deaths={b} src={s}:{d}:{d}\n", .{ - @tagName(inst.tag), - inst.ty, - inst.deaths, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } -} - pub fn getTarget(mod: Module) Target { return mod.comp.bin_file.options.target; } diff --git a/src/Sema.zig b/src/Sema.zig index b4e10837af..d7ec01696f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -12,9 +12,9 @@ gpa: *Allocator, arena: *Allocator, code: Zir, air_instructions: std.MultiArrayList(Air.Inst) = .{}, -air_extra: ArrayListUnmanaged(u32) = .{}, -air_values: ArrayListUnmanaged(Value) = .{}, -air_variables: ArrayListUnmanaged(Module.Var) = .{}, +air_extra: std.ArrayListUnmanaged(u32) = .{}, +air_values: std.ArrayListUnmanaged(Value) = .{}, +air_variables: std.ArrayListUnmanaged(*Module.Var) = .{}, /// Maps ZIR to AIR. inst_map: InstMap = .{}, /// When analyzing an inline function call, owner_decl is the Decl of the caller @@ -1263,15 +1263,16 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air sema.next_arg_index += 1; // TODO check if arg_name shadows a Decl + _ = arg_name; if (block.inlining) |_| { return sema.param_inst_list[arg_index]; } - // Need to set the name of the Air.Arg instruction. - const air_arg = sema.param_inst_list[arg_index].castTag(.arg).?; - air_arg.name = arg_name; - return &air_arg.base; + // Set the name of the Air.Arg instruction for use by codegen debug info. + const air_arg = sema.param_inst_list[arg_index]; + sema.air.instructions.items(.data)[air_arg].ty_str.str = inst_data.start; + return air_arg; } fn zirAllocExtended( @@ -7940,3 +7941,102 @@ fn enumFieldSrcLoc( } } else unreachable; } + +pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { + switch (ty.tag()) { + .u8 => return .u8_type, + .i8 => return .i8_type, + .u16 => return .u16_type, + .i16 => return .i16_type, + .u32 => return .u32_type, + .i32 => return .i32_type, + .u64 => return .u64_type, + .i64 => return .i64_type, + .u128 => return .u128_type, + .i128 => return .i128_type, + .usize => return .usize_type, + .isize => return .isize_type, + .c_short => return .c_short_type, + .c_ushort => return .c_ushort_type, + .c_int => return .c_int_type, + .c_uint => return .c_uint_type, + .c_long => return .c_long_type, + .c_ulong => return .c_ulong_type, + .c_longlong => return .c_longlong_type, + .c_ulonglong => return .c_ulonglong_type, + .c_longdouble => return .c_longdouble_type, + .f16 => return .f16_type, + .f32 => return .f32_type, + .f64 => return .f64_type, + .f128 => return .f128_type, + .c_void => return .c_void_type, + .bool => return .bool_type, + .void => return .void_type, + .type => return .type_type, + .anyerror => return .anyerror_type, + .comptime_int => return .comptime_int_type, + .comptime_float => return .comptime_float_type, + .noreturn => return .noreturn_type, + .@"anyframe" => return .anyframe_type, + .@"null" => return .null_type, + .@"undefined" => return .undefined_type, + .enum_literal => return .enum_literal_type, + .atomic_ordering => return .atomic_ordering_type, + .atomic_rmw_op => return .atomic_rmw_op_type, + .calling_convention => return .calling_convention_type, + .float_mode => return .float_mode_type, + .reduce_op => return .reduce_op_type, + .call_options => return .call_options_type, + .export_options => return .export_options_type, + .extern_options => return .extern_options_type, + .manyptr_u8 => return .manyptr_u8_type, + .manyptr_const_u8 => return .manyptr_const_u8_type, + .fn_noreturn_no_args => return .fn_noreturn_no_args_type, + .fn_void_no_args => return .fn_void_no_args_type, + .fn_naked_noreturn_no_args => return .fn_naked_noreturn_no_args_type, + .fn_ccc_void_no_args => return .fn_ccc_void_no_args_type, + .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, + .const_slice_u8 => return .const_slice_u8_type, + else => {}, + } + try sema.air_instructions.append(sema.gpa, .{ + .tag = .const_ty, + .data = .{ .ty = ty }, + }); + return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); +} + +const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; + +fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { + return @intToEnum(Air.Inst.Ref, ref_start_index + inst); +} + +fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { + const ref_int = @enumToInt(inst); + if (ref_int >= ref_start_index) { + return ref_int - ref_start_index; + } else { + return null; + } +} + +pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 { + const fields = std.meta.fields(@TypeOf(extra)); + try sema.air_extra.ensureUnusedCapacity(sema.gpa, fields.len); + return addExtraAssumeCapacity(sema, extra); +} + +pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { + const fields = std.meta.fields(@TypeOf(extra)); + const result = @intCast(u32, sema.air_extra.items.len); + inline for (fields) |field| { + sema.air_extra.appendAssumeCapacity(switch (field.field_type) { + u32 => @field(extra, field.name), + Air.Inst.Ref => @enumToInt(@field(extra, field.name)), + i32 => @bitCast(u32, @field(extra, field.name)), + else => @compileError("bad field type"), + }); + } + return result; +} diff --git a/src/codegen.zig b/src/codegen.zig index 65e85702e5..eaf910977e 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -3,6 +3,7 @@ const mem = std.mem; const math = std.math; const assert = std.debug.assert; const Air = @import("Air.zig"); +const Liveness = @import("Liveness.zig"); const Type = @import("type.zig").Type; const Value = @import("value.zig").Value; const TypedValue = @import("TypedValue.zig"); @@ -45,6 +46,71 @@ pub const DebugInfoOutput = union(enum) { none, }; +pub fn generateFunction( + bin_file: *link.File, + src_loc: Module.SrcLoc, + func: *Module.Fn, + air: Air, + liveness: Liveness, + code: *std.ArrayList(u8), + debug_output: DebugInfoOutput, +) GenerateSymbolError!Result { + switch (bin_file.options.target.cpu.arch) { + .wasm32 => unreachable, // has its own code path + .wasm64 => unreachable, // has its own code path + .arm => return Function(.arm).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .armeb => return Function(.armeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .aarch64 => return Function(.aarch64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .aarch64_be => return Function(.aarch64_be).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .aarch64_32 => return Function(.aarch64_32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.arc => return Function(.arc).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.avr => return Function(.avr).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.bpfel => return Function(.bpfel).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.bpfeb => return Function(.bpfeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.hexagon => return Function(.hexagon).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.mips => return Function(.mips).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.mipsel => return Function(.mipsel).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.mips64 => return Function(.mips64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.mips64el => return Function(.mips64el).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.msp430 => return Function(.msp430).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.powerpc => return Function(.powerpc).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.powerpc64 => return Function(.powerpc64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.powerpc64le => return Function(.powerpc64le).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.r600 => return Function(.r600).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.amdgcn => return Function(.amdgcn).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.riscv32 => return Function(.riscv32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .riscv64 => return Function(.riscv64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.sparc => return Function(.sparc).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.sparcv9 => return Function(.sparcv9).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.sparcel => return Function(.sparcel).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.s390x => return Function(.s390x).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.tce => return Function(.tce).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.tcele => return Function(.tcele).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.thumb => return Function(.thumb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.thumbeb => return Function(.thumbeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.i386 => return Function(.i386).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .x86_64 => return Function(.x86_64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.xcore => return Function(.xcore).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.nvptx => return Function(.nvptx).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.nvptx64 => return Function(.nvptx64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.le32 => return Function(.le32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.le64 => return Function(.le64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.amdil => return Function(.amdil).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.amdil64 => return Function(.amdil64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.hsail => return Function(.hsail).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.hsail64 => return Function(.hsail64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.spir => return Function(.spir).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.spir64 => return Function(.spir64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.kalimba => return Function(.kalimba).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.shave => return Function(.shave).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.lanai => return Function(.lanai).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.renderscript32 => return Function(.renderscript32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.renderscript64 => return Function(.renderscript64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.ve => return Function(.ve).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + else => @panic("Backend architectures that don't have good support yet are commented out, to improve compilation performance. If you are interested in one of these other backends feel free to uncomment them. Eventually these will be completed, but stage1 is slow and a memory hog."), + } +} + pub fn generateSymbol( bin_file: *link.File, src_loc: Module.SrcLoc, @@ -57,60 +123,14 @@ pub fn generateSymbol( switch (typed_value.ty.zigTypeTag()) { .Fn => { - switch (bin_file.options.target.cpu.arch) { - .wasm32 => unreachable, // has its own code path - .wasm64 => unreachable, // has its own code path - .arm => return Function(.arm).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .armeb => return Function(.armeb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .aarch64 => return Function(.aarch64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.arc => return Function(.arc).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.avr => return Function(.avr).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.bpfel => return Function(.bpfel).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.hexagon => return Function(.hexagon).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.mips => return Function(.mips).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.mipsel => return Function(.mipsel).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.mips64 => return Function(.mips64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.mips64el => return Function(.mips64el).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.msp430 => return Function(.msp430).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.powerpc => return Function(.powerpc).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.r600 => return Function(.r600).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .riscv64 => return Function(.riscv64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.sparc => return Function(.sparc).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.sparcel => return Function(.sparcel).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.s390x => return Function(.s390x).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.tce => return Function(.tce).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.tcele => return Function(.tcele).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.thumb => return Function(.thumb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.i386 => return Function(.i386).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .x86_64 => return Function(.x86_64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.xcore => return Function(.xcore).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.nvptx => return Function(.nvptx).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.le32 => return Function(.le32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.le64 => return Function(.le64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.amdil => return Function(.amdil).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.hsail => return Function(.hsail).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.spir => return Function(.spir).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.spir64 => return Function(.spir64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.kalimba => return Function(.kalimba).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.shave => return Function(.shave).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.lanai => return Function(.lanai).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.ve => return Function(.ve).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - else => @panic("Backend architectures that don't have good support yet are commented out, to improve compilation performance. If you are interested in one of these other backends feel free to uncomment them. Eventually these will be completed, but stage1 is slow and a memory hog."), - } + return Result{ + .fail = try ErrorMsg.create( + bin_file.allocator, + src_loc, + "TODO implement generateSymbol function pointers", + .{}, + ), + }; }, .Array => { // TODO populate .debug_info for the array @@ -262,6 +282,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return struct { gpa: *Allocator, + air: *const Air, bin_file: *link.File, target: *const std.Target, mod_fn: *const Module.Fn, @@ -421,10 +442,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const Self = @This(); - fn generateSymbol( + fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - typed_value: TypedValue, + module_fn: *Module.Fn, + air: Air, + liveness: Liveness, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, ) GenerateSymbolError!Result { @@ -432,8 +455,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { @panic("Attempted to compile for architecture that was disabled by build configuration"); } - const module_fn = typed_value.val.castTag(.function).?.data; - assert(module_fn.owner_decl.has_tv); const fn_type = module_fn.owner_decl.ty; @@ -447,6 +468,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { var function = Self{ .gpa = bin_file.allocator, + .air = &air, + .liveness = &liveness, .target = &bin_file.options.target, .bin_file = bin_file, .mod_fn = module_fn, @@ -2131,8 +2154,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, mcv: MCValue) !void { - const name_with_null = inst.name[0 .. mem.lenZ(inst.name) + 1]; - const ty = self.air.getType(inst); + const ty_str = self.air.instruction.items(.data)[inst].ty_str; + const zir = &self.mod_fn.owner_decl.namespace.file_scope.zir; + const name = zir.nullTerminatedString(ty_str.str); + const name_with_null = name.ptr[0 .. name.len + 1]; + const ty = self.air.getRefType(ty_str.ty); switch (mcv) { .register => |reg| { @@ -2249,8 +2275,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genCall(self: *Self, inst: Air.Inst.Index) !MCValue { - const inst_datas = self.air.instructions.items(.data); - const pl_op = inst_datas[inst].pl_op; + const pl_op = self.air.instruction.items(.data)[inst].pl_op; const fn_ty = self.air.getType(pl_op.operand); const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, inst_data.payload); @@ -2848,8 +2873,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genCondBr(self: *Self, inst: Air.Inst.Index) !MCValue { - const inst_datas = self.air.instructions.items(.data); - const pl_op = inst_datas[inst].pl_op; + const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); const extra = self.air.extraData(Air.CondBr, inst_data.payload); const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; @@ -3101,16 +3125,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genIsNull(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); return self.isNull(operand); } fn genIsNullPtr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -3126,16 +3150,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genIsNonNull(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); return self.isNonNull(operand); } fn genIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -3151,16 +3175,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genIsErr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); return self.isErr(operand); } fn genIsErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -3176,16 +3200,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genIsNonErr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); return self.isNonErr(operand); } fn genIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -3200,8 +3224,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genLoop(self: *Self, inst: Air.Inst.Index) !MCValue { // A loop is a setup to be able to jump back to the beginning. - const inst_datas = self.air.instructions.items(.data); - const loop = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; const start_index = self.code.items.len; try self.genBody(body); @@ -4377,13 +4401,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genPtrToInt(self: *Self, inst: Air.Inst.Index) !MCValue { - const inst_datas = self.air.instructions.items(.data); - return self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + return self.resolveInst(un_op); } fn genBitCast(self: *Self, inst: Air.Inst.Index) !MCValue { - const inst_datas = self.air.instructions.items(.data); - return self.resolveInst(inst_datas[inst].ty_op.operand); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + return self.resolveInst(ty_op.operand); } fn resolveInst(self: *Self, inst: Air.Inst.Index) !MCValue { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 4a9087d7f5..3d704a8dc5 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -159,7 +159,10 @@ pub const DeclGen = struct { /// The SPIR-V module code should be put in. spv: *SPIRVModule, - /// An array of function argument result-ids. Each index corresponds with the function argument of the same index. + air: *const Air, + + /// An array of function argument result-ids. Each index corresponds with the + /// function argument of the same index. args: std.ArrayList(ResultId), /// A counter to keep track of how many `arg` instructions we've seen yet. @@ -168,33 +171,35 @@ pub const DeclGen = struct { /// A map keeping track of which instruction generated which result-id. inst_results: InstMap, - /// We need to keep track of result ids for block labels, as well as the 'incoming' blocks for a block. + /// We need to keep track of result ids for block labels, as well as the 'incoming' + /// blocks for a block. blocks: BlockMap, /// The label of the SPIR-V block we are currently generating. current_block_label_id: ResultId, - /// The actual instructions for this function. We need to declare all locals in the first block, and because we don't - /// know which locals there are going to be, we're just going to generate everything after the locals-section in this array. - /// Note: It will not contain OpFunction, OpFunctionParameter, OpVariable and the initial OpLabel. These will be generated - /// into spv.binary.fn_decls directly. + /// The actual instructions for this function. We need to declare all locals in + /// the first block, and because we don't know which locals there are going to be, + /// we're just going to generate everything after the locals-section in this array. + /// Note: It will not contain OpFunction, OpFunctionParameter, OpVariable and the + /// initial OpLabel. These will be generated into spv.binary.fn_decls directly. code: std.ArrayList(Word), /// The decl we are currently generating code for. decl: *Decl, - /// If `gen` returned `Error.AnalysisFail`, this contains an explanatory message. Memory is owned by - /// `module.gpa`. + /// If `gen` returned `Error.AnalysisFail`, this contains an explanatory message. + /// Memory is owned by `module.gpa`. error_msg: ?*Module.ErrorMsg, /// Possible errors the `gen` function may return. const Error = error{ AnalysisFail, OutOfMemory }; - /// This structure is used to return information about a type typically used for arithmetic operations. - /// These types may either be integers, floats, or a vector of these. Most scalar operations also work on vectors, - /// so we can easily represent those as arithmetic types. - /// If the type is a scalar, 'inner type' refers to the scalar type. Otherwise, if its a vector, it refers - /// to the vector's element type. + /// This structure is used to return information about a type typically used for + /// arithmetic operations. These types may either be integers, floats, or a vector + /// of these. Most scalar operations also work on vectors, so we can easily represent + /// those as arithmetic types. If the type is a scalar, 'inner type' refers to the + /// scalar type. Otherwise, if its a vector, it refers to the vector's element type. const ArithmeticTypeInfo = struct { /// A classification of the inner type. const Class = enum { @@ -206,13 +211,14 @@ pub const DeclGen = struct { /// the relevant capability is enabled). integer, - /// A regular float. These are all required to be natively supported. Floating points for - /// which the relevant capability is not enabled are not emulated. + /// A regular float. These are all required to be natively supported. Floating points + /// for which the relevant capability is not enabled are not emulated. float, - /// An integer of a 'strange' size (which' bit size is not the same as its backing type. **Note**: this - /// may **also** include power-of-2 integers for which the relevant capability is not enabled), but still - /// within the limits of the largest natively supported integer type. + /// An integer of a 'strange' size (which' bit size is not the same as its backing + /// type. **Note**: this may **also** include power-of-2 integers for which the + /// relevant capability is not enabled), but still within the limits of the largest + /// natively supported integer type. strange_integer, /// An integer with more bits than the largest natively supported integer type. @@ -220,7 +226,7 @@ pub const DeclGen = struct { }; /// The number of bits in the inner type. - /// Note: this is the actual number of bits of the type, not the size of the backing integer. + /// This is the actual number of bits of the type, not the size of the backing integer. bits: u16, /// Whether the type is a vector. @@ -234,10 +240,12 @@ pub const DeclGen = struct { class: Class, }; - /// Initialize the common resources of a DeclGen. Some fields are left uninitialized, only set when `gen` is called. + /// Initialize the common resources of a DeclGen. Some fields are left uninitialized, + /// only set when `gen` is called. pub fn init(spv: *SPIRVModule) DeclGen { return .{ .spv = spv, + .air = undefined, .args = std.ArrayList(ResultId).init(spv.gpa), .next_arg_index = undefined, .inst_results = InstMap.init(spv.gpa), @@ -252,8 +260,9 @@ pub const DeclGen = struct { /// Generate the code for `decl`. If a reportable error occured during code generation, /// a message is returned by this function. Callee owns the memory. If this function returns such /// a reportable error, it is valid to be called again for a different decl. - pub fn gen(self: *DeclGen, decl: *Decl) !?*Module.ErrorMsg { + pub fn gen(self: *DeclGen, decl: *Decl, air: Air) !?*Module.ErrorMsg { // Reset internal resources, we don't want to re-allocate these. + self.air = &air; self.args.items.len = 0; self.next_arg_index = 0; self.inst_results.clearRetainingCapacity(); @@ -680,7 +689,7 @@ pub const DeclGen = struct { .br => return self.genBr(inst), .breakpoint => return, - .condbr => return self.genCondBr(inst), + .cond_br => return self.genCondBr(inst), .constant => unreachable, .dbg_stmt => return self.genDbgStmt(inst), .loop => return self.genLoop(inst), @@ -688,6 +697,10 @@ pub const DeclGen = struct { .store => return self.genStore(inst), .unreach => return self.genUnreach(), // zig fmt: on + + else => |tag| return self.fail("TODO: SPIR-V backend: implement AIR tag {s}", .{ + @tagName(tag), + }), }; try self.inst_results.putNoClobber(inst, result_id); diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index bfae799462..8a2e877d42 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -135,6 +135,10 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void { const tracy = trace(@src()); defer tracy.end(); + if (build_options.skip_non_native) { + @panic("Attempted to compile for architecture that was disabled by build configuration"); + } + const module = self.base.options.module.?; const target = comp.getTarget(); diff --git a/src/register_manager.zig b/src/register_manager.zig index 8aca7fcc3d..f0d128e7f9 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -20,7 +20,7 @@ pub fn RegisterManager( ) type { return struct { /// The key must be canonical register. - registers: [callee_preserved_regs.len]?*ir.Inst = [_]?*ir.Inst{null} ** callee_preserved_regs.len, + registers: [callee_preserved_regs.len]?Air.Inst.Index = [_]?Air.Inst.Index{null} ** callee_preserved_regs.len, free_registers: FreeRegInt = math.maxInt(FreeRegInt), /// Tracks all registers allocated in the course of this function allocated_registers: FreeRegInt = 0, @@ -75,7 +75,7 @@ pub fn RegisterManager( pub fn tryAllocRegs( self: *Self, comptime count: comptime_int, - insts: [count]?*ir.Inst, + insts: [count]?Air.Inst.Index, exceptions: []const Register, ) ?[count]Register { comptime if (callee_preserved_regs.len == 0) return null; @@ -113,7 +113,7 @@ pub fn RegisterManager( /// Allocates a register and optionally tracks it with a /// corresponding instruction. Returns `null` if all registers /// are allocated. - pub fn tryAllocReg(self: *Self, inst: ?*ir.Inst, exceptions: []const Register) ?Register { + pub fn tryAllocReg(self: *Self, inst: ?Air.Inst.Index, exceptions: []const Register) ?Register { return if (tryAllocRegs(self, 1, .{inst}, exceptions)) |regs| regs[0] else null; } @@ -123,7 +123,7 @@ pub fn RegisterManager( pub fn allocRegs( self: *Self, comptime count: comptime_int, - insts: [count]?*ir.Inst, + insts: [count]?Air.Inst.Index, exceptions: []const Register, ) ![count]Register { comptime assert(count > 0 and count <= callee_preserved_regs.len); @@ -168,14 +168,14 @@ pub fn RegisterManager( /// Allocates a register and optionally tracks it with a /// corresponding instruction. - pub fn allocReg(self: *Self, inst: ?*ir.Inst, exceptions: []const Register) !Register { + pub fn allocReg(self: *Self, inst: ?Air.Inst.Index, exceptions: []const Register) !Register { return (try self.allocRegs(1, .{inst}, exceptions))[0]; } /// Spills the register if it is currently allocated. If a /// corresponding instruction is passed, will also track this /// register. - pub fn getReg(self: *Self, reg: Register, inst: ?*ir.Inst) !void { + pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) !void { const index = reg.allocIndex() orelse return; if (inst) |tracked_inst| @@ -202,7 +202,7 @@ pub fn RegisterManager( /// Allocates the specified register with the specified /// instruction. Asserts that the register is free and no /// spilling is necessary. - pub fn getRegAssumeFree(self: *Self, reg: Register, inst: *ir.Inst) void { + pub fn getRegAssumeFree(self: *Self, reg: Register, inst: Air.Inst.Index) void { const index = reg.allocIndex() orelse return; assert(self.registers[index] == null); @@ -264,7 +264,7 @@ fn MockFunction(comptime Register: type) type { self.spilled.deinit(self.allocator); } - pub fn spillInstruction(self: *Self, reg: Register, inst: *ir.Inst) !void { + pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { _ = inst; try self.spilled.append(self.allocator, reg); } -- cgit v1.2.3 From 0f38f686964664f68e013ec3c63cfe655001f165 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 12 Jul 2021 19:51:31 -0700 Subject: stage2: Air and Liveness are passed ephemerally to the link infrastructure, instead of being stored with Module.Fn. This moves towards a strategy to make more efficient use of memory by not storing Air or Liveness data in the Fn struct, but computing it on demand, immediately sending it to the backend, and then immediately freeing it. Backends which want to defer codegen until flush() such as SPIR-V must move the Air/Liveness data upon `updateFunc` being called and keep track of that data in the backend implementation itself. --- BRANCH_TODO | 5 + src/Compilation.zig | 2 +- src/Liveness.zig | 9 +- src/Module.zig | 5 - src/Sema.zig | 762 +++++++++++++++++++++++++------------------------- src/codegen.zig | 7 +- src/codegen/c.zig | 9 +- src/codegen/llvm.zig | 3 + src/codegen/spirv.zig | 3 +- src/codegen/wasm.zig | 88 +++--- src/link.zig | 34 ++- src/link/C.zig | 28 +- src/link/Coff.zig | 56 +++- src/link/Elf.zig | 558 +++++++++++++++++++----------------- src/link/MachO.zig | 55 ++++ src/link/Plan9.zig | 29 +- src/link/SpirV.zig | 24 +- src/link/Wasm.zig | 59 +++- 18 files changed, 1023 insertions(+), 713 deletions(-) (limited to 'src/Module.zig') diff --git a/BRANCH_TODO b/BRANCH_TODO index 585c8adf44..c7f3923559 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -690,3 +690,8 @@ pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { } } + /// For debugging purposes. + pub fn dump(func: *Fn, mod: Module) void { + ir.dumpFn(mod, func); + } + diff --git a/src/Compilation.zig b/src/Compilation.zig index 74ad7b2aae..90224a77d1 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2027,7 +2027,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor defer liveness.deinit(gpa); if (std.builtin.mode == .Debug and self.verbose_air) { - func.dump(module.*); + @panic("TODO implement dumping AIR and liveness"); } assert(decl.ty.hasCodeGenBits()); diff --git a/src/Liveness.zig b/src/Liveness.zig index 0cbac61118..1402a5997b 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -50,7 +50,7 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { var a: Analysis = .{ .gpa = gpa, - .air = &air, + .air = air, .table = .{}, .tomb_bits = try gpa.alloc( usize, @@ -65,7 +65,7 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { defer a.table.deinit(gpa); const main_body = air.getMainBody(); - try a.table.ensureTotalCapacity(main_body.len); + try a.table.ensureTotalCapacity(gpa, @intCast(u32, main_body.len)); try analyzeWithContext(&a, null, main_body); return Liveness{ .tomb_bits = a.tomb_bits, @@ -108,9 +108,10 @@ const OperandInt = std.math.Log2Int(Bpi); /// In-progress data; on successful analysis converted into `Liveness`. const Analysis = struct { gpa: *Allocator, - air: *const Air, + air: Air, table: std.AutoHashMapUnmanaged(Air.Inst.Index, void), tomb_bits: []usize, + special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), extra: std.ArrayListUnmanaged(u32), fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void { @@ -165,7 +166,7 @@ fn analyzeWithContext( fn analyzeInst( a: *Analysis, - new_set: ?*std.AutoHashMap(Air.Inst.Index, void), + new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), inst: Air.Inst.Index, ) Allocator.Error!void { const gpa = a.gpa; diff --git a/src/Module.zig b/src/Module.zig index 8971a57487..5972c2bdcf 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -769,11 +769,6 @@ pub const Fn = struct { success, }; - /// For debugging purposes. - pub fn dump(func: *Fn, mod: Module) void { - ir.dumpFn(mod, func); - } - pub fn deinit(func: *Fn, gpa: *Allocator) void { if (func.getInferredErrorSet()) |map| { map.deinit(gpa); diff --git a/src/Sema.zig b/src/Sema.zig index d7ec01696f..54c42a482d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -69,7 +69,7 @@ const LazySrcLoc = Module.LazySrcLoc; const RangeSet = @import("RangeSet.zig"); const target_util = @import("target.zig"); -pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Index); +pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Ref); pub fn deinit(sema: *Sema) void { const gpa = sema.gpa; @@ -158,344 +158,344 @@ pub fn analyzeBody( var i: usize = 0; while (true) { const inst = body[i]; - const air_inst = switch (tags[inst]) { + const air_inst: Air.Inst.Ref = switch (tags[inst]) { // zig fmt: off .arg => try sema.zirArg(block, inst), - .alloc => try sema.zirAlloc(block, inst), - .alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), - .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), - .alloc_inferred_comptime => try sema.zirAllocInferredComptime(block, inst), - .alloc_mut => try sema.zirAllocMut(block, inst), - .alloc_comptime => try sema.zirAllocComptime(block, inst), - .anyframe_type => try sema.zirAnyframeType(block, inst), - .array_cat => try sema.zirArrayCat(block, inst), - .array_mul => try sema.zirArrayMul(block, inst), - .array_type => try sema.zirArrayType(block, inst), - .array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), - .vector_type => try sema.zirVectorType(block, inst), - .as => try sema.zirAs(block, inst), - .as_node => try sema.zirAsNode(block, inst), - .bit_and => try sema.zirBitwise(block, inst, .bit_and), - .bit_not => try sema.zirBitNot(block, inst), - .bit_or => try sema.zirBitwise(block, inst, .bit_or), - .bitcast => try sema.zirBitcast(block, inst), - .bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst), - .block => try sema.zirBlock(block, inst), - .suspend_block => try sema.zirSuspendBlock(block, inst), - .bool_not => try sema.zirBoolNot(block, inst), - .bool_and => try sema.zirBoolOp(block, inst, false), - .bool_or => try sema.zirBoolOp(block, inst, true), - .bool_br_and => try sema.zirBoolBr(block, inst, false), - .bool_br_or => try sema.zirBoolBr(block, inst, true), - .c_import => try sema.zirCImport(block, inst), - .call => try sema.zirCall(block, inst, .auto, false), - .call_chkused => try sema.zirCall(block, inst, .auto, true), - .call_compile_time => try sema.zirCall(block, inst, .compile_time, false), - .call_nosuspend => try sema.zirCall(block, inst, .no_async, false), - .call_async => try sema.zirCall(block, inst, .async_kw, false), - .cmp_eq => try sema.zirCmp(block, inst, .eq), - .cmp_gt => try sema.zirCmp(block, inst, .gt), - .cmp_gte => try sema.zirCmp(block, inst, .gte), - .cmp_lt => try sema.zirCmp(block, inst, .lt), - .cmp_lte => try sema.zirCmp(block, inst, .lte), - .cmp_neq => try sema.zirCmp(block, inst, .neq), - .coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), - .decl_ref => try sema.zirDeclRef(block, inst), - .decl_val => try sema.zirDeclVal(block, inst), - .load => try sema.zirLoad(block, inst), - .elem_ptr => try sema.zirElemPtr(block, inst), - .elem_ptr_node => try sema.zirElemPtrNode(block, inst), - .elem_val => try sema.zirElemVal(block, inst), - .elem_val_node => try sema.zirElemValNode(block, inst), - .elem_type => try sema.zirElemType(block, inst), - .enum_literal => try sema.zirEnumLiteral(block, inst), - .enum_to_int => try sema.zirEnumToInt(block, inst), - .int_to_enum => try sema.zirIntToEnum(block, inst), - .err_union_code => try sema.zirErrUnionCode(block, inst), - .err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst), - .err_union_payload_safe => try sema.zirErrUnionPayload(block, inst, true), - .err_union_payload_safe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, true), - .err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst, false), - .err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, false), - .error_union_type => try sema.zirErrorUnionType(block, inst), - .error_value => try sema.zirErrorValue(block, inst), - .error_to_int => try sema.zirErrorToInt(block, inst), - .int_to_error => try sema.zirIntToError(block, inst), - .field_ptr => try sema.zirFieldPtr(block, inst), - .field_ptr_named => try sema.zirFieldPtrNamed(block, inst), - .field_val => try sema.zirFieldVal(block, inst), - .field_val_named => try sema.zirFieldValNamed(block, inst), - .func => try sema.zirFunc(block, inst, false), - .func_inferred => try sema.zirFunc(block, inst, true), - .import => try sema.zirImport(block, inst), - .indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst), - .int => try sema.zirInt(block, inst), - .int_big => try sema.zirIntBig(block, inst), - .float => try sema.zirFloat(block, inst), - .float128 => try sema.zirFloat128(block, inst), - .int_type => try sema.zirIntType(block, inst), - .is_non_err => try sema.zirIsNonErr(block, inst), - .is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), - .is_non_null => try sema.zirIsNonNull(block, inst), - .is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), - .loop => try sema.zirLoop(block, inst), - .merge_error_sets => try sema.zirMergeErrorSets(block, inst), - .negate => try sema.zirNegate(block, inst, .sub), - .negate_wrap => try sema.zirNegate(block, inst, .subwrap), - .optional_payload_safe => try sema.zirOptionalPayload(block, inst, true), - .optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true), - .optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false), - .optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false), - .optional_type => try sema.zirOptionalType(block, inst), - .param_type => try sema.zirParamType(block, inst), - .ptr_type => try sema.zirPtrType(block, inst), - .ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), - .ref => try sema.zirRef(block, inst), - .ret_err_value_code => try sema.zirRetErrValueCode(block, inst), - .shl => try sema.zirShl(block, inst), - .shr => try sema.zirShr(block, inst), - .slice_end => try sema.zirSliceEnd(block, inst), - .slice_sentinel => try sema.zirSliceSentinel(block, inst), - .slice_start => try sema.zirSliceStart(block, inst), - .str => try sema.zirStr(block, inst), - .switch_block => try sema.zirSwitchBlock(block, inst, false, .none), - .switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none), - .switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"), - .switch_block_else_multi => try sema.zirSwitchBlockMulti(block, inst, false, .@"else"), - .switch_block_under => try sema.zirSwitchBlock(block, inst, false, .under), - .switch_block_under_multi => try sema.zirSwitchBlockMulti(block, inst, false, .under), - .switch_block_ref => try sema.zirSwitchBlock(block, inst, true, .none), - .switch_block_ref_multi => try sema.zirSwitchBlockMulti(block, inst, true, .none), - .switch_block_ref_else => try sema.zirSwitchBlock(block, inst, true, .@"else"), - .switch_block_ref_else_multi => try sema.zirSwitchBlockMulti(block, inst, true, .@"else"), - .switch_block_ref_under => try sema.zirSwitchBlock(block, inst, true, .under), - .switch_block_ref_under_multi => try sema.zirSwitchBlockMulti(block, inst, true, .under), - .switch_capture => try sema.zirSwitchCapture(block, inst, false, false), - .switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), - .switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), - .switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), - .switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), - .switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), - .type_info => try sema.zirTypeInfo(block, inst), - .size_of => try sema.zirSizeOf(block, inst), - .bit_size_of => try sema.zirBitSizeOf(block, inst), - .typeof => try sema.zirTypeof(block, inst), - .typeof_elem => try sema.zirTypeofElem(block, inst), - .log2_int_type => try sema.zirLog2IntType(block, inst), - .typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst), - .xor => try sema.zirBitwise(block, inst, .xor), - .struct_init_empty => try sema.zirStructInitEmpty(block, inst), - .struct_init => try sema.zirStructInit(block, inst, false), - .struct_init_ref => try sema.zirStructInit(block, inst, true), - .struct_init_anon => try sema.zirStructInitAnon(block, inst, false), - .struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true), - .array_init => try sema.zirArrayInit(block, inst, false), - .array_init_ref => try sema.zirArrayInit(block, inst, true), - .array_init_anon => try sema.zirArrayInitAnon(block, inst, false), - .array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true), - .union_init_ptr => try sema.zirUnionInitPtr(block, inst), - .field_type => try sema.zirFieldType(block, inst), - .field_type_ref => try sema.zirFieldTypeRef(block, inst), - .ptr_to_int => try sema.zirPtrToInt(block, inst), - .align_of => try sema.zirAlignOf(block, inst), - .bool_to_int => try sema.zirBoolToInt(block, inst), - .embed_file => try sema.zirEmbedFile(block, inst), - .error_name => try sema.zirErrorName(block, inst), - .tag_name => try sema.zirTagName(block, inst), - .reify => try sema.zirReify(block, inst), - .type_name => try sema.zirTypeName(block, inst), - .frame_type => try sema.zirFrameType(block, inst), - .frame_size => try sema.zirFrameSize(block, inst), - .float_to_int => try sema.zirFloatToInt(block, inst), - .int_to_float => try sema.zirIntToFloat(block, inst), - .int_to_ptr => try sema.zirIntToPtr(block, inst), - .float_cast => try sema.zirFloatCast(block, inst), - .int_cast => try sema.zirIntCast(block, inst), - .err_set_cast => try sema.zirErrSetCast(block, inst), - .ptr_cast => try sema.zirPtrCast(block, inst), - .truncate => try sema.zirTruncate(block, inst), - .align_cast => try sema.zirAlignCast(block, inst), - .has_decl => try sema.zirHasDecl(block, inst), - .has_field => try sema.zirHasField(block, inst), - .clz => try sema.zirClz(block, inst), - .ctz => try sema.zirCtz(block, inst), - .pop_count => try sema.zirPopCount(block, inst), - .byte_swap => try sema.zirByteSwap(block, inst), - .bit_reverse => try sema.zirBitReverse(block, inst), - .div_exact => try sema.zirDivExact(block, inst), - .div_floor => try sema.zirDivFloor(block, inst), - .div_trunc => try sema.zirDivTrunc(block, inst), - .mod => try sema.zirMod(block, inst), - .rem => try sema.zirRem(block, inst), - .shl_exact => try sema.zirShlExact(block, inst), - .shr_exact => try sema.zirShrExact(block, inst), - .bit_offset_of => try sema.zirBitOffsetOf(block, inst), - .offset_of => try sema.zirOffsetOf(block, inst), - .cmpxchg_strong => try sema.zirCmpxchg(block, inst), - .cmpxchg_weak => try sema.zirCmpxchg(block, inst), - .splat => try sema.zirSplat(block, inst), - .reduce => try sema.zirReduce(block, inst), - .shuffle => try sema.zirShuffle(block, inst), - .atomic_load => try sema.zirAtomicLoad(block, inst), - .atomic_rmw => try sema.zirAtomicRmw(block, inst), - .atomic_store => try sema.zirAtomicStore(block, inst), - .mul_add => try sema.zirMulAdd(block, inst), - .builtin_call => try sema.zirBuiltinCall(block, inst), - .field_ptr_type => try sema.zirFieldPtrType(block, inst), - .field_parent_ptr => try sema.zirFieldParentPtr(block, inst), - .memcpy => try sema.zirMemcpy(block, inst), - .memset => try sema.zirMemset(block, inst), - .builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst), - .@"resume" => try sema.zirResume(block, inst), - .@"await" => try sema.zirAwait(block, inst, false), - .await_nosuspend => try sema.zirAwait(block, inst, true), - .extended => try sema.zirExtended(block, inst), - - .sqrt => try sema.zirUnaryMath(block, inst), - .sin => try sema.zirUnaryMath(block, inst), - .cos => try sema.zirUnaryMath(block, inst), - .exp => try sema.zirUnaryMath(block, inst), - .exp2 => try sema.zirUnaryMath(block, inst), - .log => try sema.zirUnaryMath(block, inst), - .log2 => try sema.zirUnaryMath(block, inst), - .log10 => try sema.zirUnaryMath(block, inst), - .fabs => try sema.zirUnaryMath(block, inst), - .floor => try sema.zirUnaryMath(block, inst), - .ceil => try sema.zirUnaryMath(block, inst), - .trunc => try sema.zirUnaryMath(block, inst), - .round => try sema.zirUnaryMath(block, inst), - - .opaque_decl => try sema.zirOpaqueDecl(block, inst, .parent), - .opaque_decl_anon => try sema.zirOpaqueDecl(block, inst, .anon), - .opaque_decl_func => try sema.zirOpaqueDecl(block, inst, .func), - .error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent), - .error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), - .error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), - - .add => try sema.zirArithmetic(block, inst), - .addwrap => try sema.zirArithmetic(block, inst), - .div => try sema.zirArithmetic(block, inst), - .mod_rem => try sema.zirArithmetic(block, inst), - .mul => try sema.zirArithmetic(block, inst), - .mulwrap => try sema.zirArithmetic(block, inst), - .sub => try sema.zirArithmetic(block, inst), - .subwrap => try sema.zirArithmetic(block, inst), - - // Instructions that we know to *always* be noreturn based solely on their tag. - // These functions match the return type of analyzeBody so that we can - // tail call them here. - .break_inline => return inst, - .condbr => return sema.zirCondbr(block, inst), - .@"break" => return sema.zirBreak(block, inst), - .compile_error => return sema.zirCompileError(block, inst), - .ret_coerce => return sema.zirRetCoerce(block, inst, true), - .ret_node => return sema.zirRetNode(block, inst), - .ret_err_value => return sema.zirRetErrValue(block, inst), - .@"unreachable" => return sema.zirUnreachable(block, inst), - .repeat => return sema.zirRepeat(block, inst), - .panic => return sema.zirPanic(block, inst), - // zig fmt: on - - // Instructions that we know can *never* be noreturn based solely on - // their tag. We avoid needlessly checking if they are noreturn and - // continue the loop. - // We also know that they cannot be referenced later, so we avoid - // putting them into the map. - .breakpoint => { - try sema.zirBreakpoint(block, inst); - i += 1; - continue; - }, - .fence => { - try sema.zirFence(block, inst); - i += 1; - continue; - }, - .dbg_stmt => { - try sema.zirDbgStmt(block, inst); - i += 1; - continue; - }, - .ensure_err_payload_void => { - try sema.zirEnsureErrPayloadVoid(block, inst); - i += 1; - continue; - }, - .ensure_result_non_error => { - try sema.zirEnsureResultNonError(block, inst); - i += 1; - continue; - }, - .ensure_result_used => { - try sema.zirEnsureResultUsed(block, inst); - i += 1; - continue; - }, - .set_eval_branch_quota => { - try sema.zirSetEvalBranchQuota(block, inst); - i += 1; - continue; - }, - .store => { - try sema.zirStore(block, inst); - i += 1; - continue; - }, - .store_node => { - try sema.zirStoreNode(block, inst); - i += 1; - continue; - }, - .store_to_block_ptr => { - try sema.zirStoreToBlockPtr(block, inst); - i += 1; - continue; - }, - .store_to_inferred_ptr => { - try sema.zirStoreToInferredPtr(block, inst); - i += 1; - continue; - }, - .resolve_inferred_alloc => { - try sema.zirResolveInferredAlloc(block, inst); - i += 1; - continue; - }, - .validate_struct_init_ptr => { - try sema.zirValidateStructInitPtr(block, inst); - i += 1; - continue; - }, - .validate_array_init_ptr => { - try sema.zirValidateArrayInitPtr(block, inst); - i += 1; - continue; - }, - .@"export" => { - try sema.zirExport(block, inst); - i += 1; - continue; - }, - .set_align_stack => { - try sema.zirSetAlignStack(block, inst); - i += 1; - continue; - }, - .set_cold => { - try sema.zirSetCold(block, inst); - i += 1; - continue; - }, - .set_float_mode => { - try sema.zirSetFloatMode(block, inst); - i += 1; - continue; - }, - .set_runtime_safety => { - try sema.zirSetRuntimeSafety(block, inst); - i += 1; - continue; - }, + //.alloc => try sema.zirAlloc(block, inst), + //.alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), + //.alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), + //.alloc_inferred_comptime => try sema.zirAllocInferredComptime(block, inst), + //.alloc_mut => try sema.zirAllocMut(block, inst), + //.alloc_comptime => try sema.zirAllocComptime(block, inst), + //.anyframe_type => try sema.zirAnyframeType(block, inst), + //.array_cat => try sema.zirArrayCat(block, inst), + //.array_mul => try sema.zirArrayMul(block, inst), + //.array_type => try sema.zirArrayType(block, inst), + //.array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), + //.vector_type => try sema.zirVectorType(block, inst), + //.as => try sema.zirAs(block, inst), + //.as_node => try sema.zirAsNode(block, inst), + //.bit_and => try sema.zirBitwise(block, inst, .bit_and), + //.bit_not => try sema.zirBitNot(block, inst), + //.bit_or => try sema.zirBitwise(block, inst, .bit_or), + //.bitcast => try sema.zirBitcast(block, inst), + //.bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst), + //.block => try sema.zirBlock(block, inst), + //.suspend_block => try sema.zirSuspendBlock(block, inst), + //.bool_not => try sema.zirBoolNot(block, inst), + //.bool_and => try sema.zirBoolOp(block, inst, false), + //.bool_or => try sema.zirBoolOp(block, inst, true), + //.bool_br_and => try sema.zirBoolBr(block, inst, false), + //.bool_br_or => try sema.zirBoolBr(block, inst, true), + //.c_import => try sema.zirCImport(block, inst), + //.call => try sema.zirCall(block, inst, .auto, false), + //.call_chkused => try sema.zirCall(block, inst, .auto, true), + //.call_compile_time => try sema.zirCall(block, inst, .compile_time, false), + //.call_nosuspend => try sema.zirCall(block, inst, .no_async, false), + //.call_async => try sema.zirCall(block, inst, .async_kw, false), + //.cmp_eq => try sema.zirCmp(block, inst, .eq), + //.cmp_gt => try sema.zirCmp(block, inst, .gt), + //.cmp_gte => try sema.zirCmp(block, inst, .gte), + //.cmp_lt => try sema.zirCmp(block, inst, .lt), + //.cmp_lte => try sema.zirCmp(block, inst, .lte), + //.cmp_neq => try sema.zirCmp(block, inst, .neq), + //.coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), + //.decl_ref => try sema.zirDeclRef(block, inst), + //.decl_val => try sema.zirDeclVal(block, inst), + //.load => try sema.zirLoad(block, inst), + //.elem_ptr => try sema.zirElemPtr(block, inst), + //.elem_ptr_node => try sema.zirElemPtrNode(block, inst), + //.elem_val => try sema.zirElemVal(block, inst), + //.elem_val_node => try sema.zirElemValNode(block, inst), + //.elem_type => try sema.zirElemType(block, inst), + //.enum_literal => try sema.zirEnumLiteral(block, inst), + //.enum_to_int => try sema.zirEnumToInt(block, inst), + //.int_to_enum => try sema.zirIntToEnum(block, inst), + //.err_union_code => try sema.zirErrUnionCode(block, inst), + //.err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst), + //.err_union_payload_safe => try sema.zirErrUnionPayload(block, inst, true), + //.err_union_payload_safe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, true), + //.err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst, false), + //.err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, false), + //.error_union_type => try sema.zirErrorUnionType(block, inst), + //.error_value => try sema.zirErrorValue(block, inst), + //.error_to_int => try sema.zirErrorToInt(block, inst), + //.int_to_error => try sema.zirIntToError(block, inst), + //.field_ptr => try sema.zirFieldPtr(block, inst), + //.field_ptr_named => try sema.zirFieldPtrNamed(block, inst), + //.field_val => try sema.zirFieldVal(block, inst), + //.field_val_named => try sema.zirFieldValNamed(block, inst), + //.func => try sema.zirFunc(block, inst, false), + //.func_inferred => try sema.zirFunc(block, inst, true), + //.import => try sema.zirImport(block, inst), + //.indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst), + //.int => try sema.zirInt(block, inst), + //.int_big => try sema.zirIntBig(block, inst), + //.float => try sema.zirFloat(block, inst), + //.float128 => try sema.zirFloat128(block, inst), + //.int_type => try sema.zirIntType(block, inst), + //.is_non_err => try sema.zirIsNonErr(block, inst), + //.is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), + //.is_non_null => try sema.zirIsNonNull(block, inst), + //.is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), + //.loop => try sema.zirLoop(block, inst), + //.merge_error_sets => try sema.zirMergeErrorSets(block, inst), + //.negate => try sema.zirNegate(block, inst, .sub), + //.negate_wrap => try sema.zirNegate(block, inst, .subwrap), + //.optional_payload_safe => try sema.zirOptionalPayload(block, inst, true), + //.optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true), + //.optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false), + //.optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false), + //.optional_type => try sema.zirOptionalType(block, inst), + //.param_type => try sema.zirParamType(block, inst), + //.ptr_type => try sema.zirPtrType(block, inst), + //.ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), + //.ref => try sema.zirRef(block, inst), + //.ret_err_value_code => try sema.zirRetErrValueCode(block, inst), + //.shl => try sema.zirShl(block, inst), + //.shr => try sema.zirShr(block, inst), + //.slice_end => try sema.zirSliceEnd(block, inst), + //.slice_sentinel => try sema.zirSliceSentinel(block, inst), + //.slice_start => try sema.zirSliceStart(block, inst), + //.str => try sema.zirStr(block, inst), + //.switch_block => try sema.zirSwitchBlock(block, inst, false, .none), + //.switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none), + //.switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"), + //.switch_block_else_multi => try sema.zirSwitchBlockMulti(block, inst, false, .@"else"), + //.switch_block_under => try sema.zirSwitchBlock(block, inst, false, .under), + //.switch_block_under_multi => try sema.zirSwitchBlockMulti(block, inst, false, .under), + //.switch_block_ref => try sema.zirSwitchBlock(block, inst, true, .none), + //.switch_block_ref_multi => try sema.zirSwitchBlockMulti(block, inst, true, .none), + //.switch_block_ref_else => try sema.zirSwitchBlock(block, inst, true, .@"else"), + //.switch_block_ref_else_multi => try sema.zirSwitchBlockMulti(block, inst, true, .@"else"), + //.switch_block_ref_under => try sema.zirSwitchBlock(block, inst, true, .under), + //.switch_block_ref_under_multi => try sema.zirSwitchBlockMulti(block, inst, true, .under), + //.switch_capture => try sema.zirSwitchCapture(block, inst, false, false), + //.switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), + //.switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), + //.switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), + //.switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), + //.switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), + //.type_info => try sema.zirTypeInfo(block, inst), + //.size_of => try sema.zirSizeOf(block, inst), + //.bit_size_of => try sema.zirBitSizeOf(block, inst), + //.typeof => try sema.zirTypeof(block, inst), + //.typeof_elem => try sema.zirTypeofElem(block, inst), + //.log2_int_type => try sema.zirLog2IntType(block, inst), + //.typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst), + //.xor => try sema.zirBitwise(block, inst, .xor), + //.struct_init_empty => try sema.zirStructInitEmpty(block, inst), + //.struct_init => try sema.zirStructInit(block, inst, false), + //.struct_init_ref => try sema.zirStructInit(block, inst, true), + //.struct_init_anon => try sema.zirStructInitAnon(block, inst, false), + //.struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true), + //.array_init => try sema.zirArrayInit(block, inst, false), + //.array_init_ref => try sema.zirArrayInit(block, inst, true), + //.array_init_anon => try sema.zirArrayInitAnon(block, inst, false), + //.array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true), + //.union_init_ptr => try sema.zirUnionInitPtr(block, inst), + //.field_type => try sema.zirFieldType(block, inst), + //.field_type_ref => try sema.zirFieldTypeRef(block, inst), + //.ptr_to_int => try sema.zirPtrToInt(block, inst), + //.align_of => try sema.zirAlignOf(block, inst), + //.bool_to_int => try sema.zirBoolToInt(block, inst), + //.embed_file => try sema.zirEmbedFile(block, inst), + //.error_name => try sema.zirErrorName(block, inst), + //.tag_name => try sema.zirTagName(block, inst), + //.reify => try sema.zirReify(block, inst), + //.type_name => try sema.zirTypeName(block, inst), + //.frame_type => try sema.zirFrameType(block, inst), + //.frame_size => try sema.zirFrameSize(block, inst), + //.float_to_int => try sema.zirFloatToInt(block, inst), + //.int_to_float => try sema.zirIntToFloat(block, inst), + //.int_to_ptr => try sema.zirIntToPtr(block, inst), + //.float_cast => try sema.zirFloatCast(block, inst), + //.int_cast => try sema.zirIntCast(block, inst), + //.err_set_cast => try sema.zirErrSetCast(block, inst), + //.ptr_cast => try sema.zirPtrCast(block, inst), + //.truncate => try sema.zirTruncate(block, inst), + //.align_cast => try sema.zirAlignCast(block, inst), + //.has_decl => try sema.zirHasDecl(block, inst), + //.has_field => try sema.zirHasField(block, inst), + //.clz => try sema.zirClz(block, inst), + //.ctz => try sema.zirCtz(block, inst), + //.pop_count => try sema.zirPopCount(block, inst), + //.byte_swap => try sema.zirByteSwap(block, inst), + //.bit_reverse => try sema.zirBitReverse(block, inst), + //.div_exact => try sema.zirDivExact(block, inst), + //.div_floor => try sema.zirDivFloor(block, inst), + //.div_trunc => try sema.zirDivTrunc(block, inst), + //.mod => try sema.zirMod(block, inst), + //.rem => try sema.zirRem(block, inst), + //.shl_exact => try sema.zirShlExact(block, inst), + //.shr_exact => try sema.zirShrExact(block, inst), + //.bit_offset_of => try sema.zirBitOffsetOf(block, inst), + //.offset_of => try sema.zirOffsetOf(block, inst), + //.cmpxchg_strong => try sema.zirCmpxchg(block, inst), + //.cmpxchg_weak => try sema.zirCmpxchg(block, inst), + //.splat => try sema.zirSplat(block, inst), + //.reduce => try sema.zirReduce(block, inst), + //.shuffle => try sema.zirShuffle(block, inst), + //.atomic_load => try sema.zirAtomicLoad(block, inst), + //.atomic_rmw => try sema.zirAtomicRmw(block, inst), + //.atomic_store => try sema.zirAtomicStore(block, inst), + //.mul_add => try sema.zirMulAdd(block, inst), + //.builtin_call => try sema.zirBuiltinCall(block, inst), + //.field_ptr_type => try sema.zirFieldPtrType(block, inst), + //.field_parent_ptr => try sema.zirFieldParentPtr(block, inst), + //.memcpy => try sema.zirMemcpy(block, inst), + //.memset => try sema.zirMemset(block, inst), + //.builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst), + //.@"resume" => try sema.zirResume(block, inst), + //.@"await" => try sema.zirAwait(block, inst, false), + //.await_nosuspend => try sema.zirAwait(block, inst, true), + //.extended => try sema.zirExtended(block, inst), + + //.sqrt => try sema.zirUnaryMath(block, inst), + //.sin => try sema.zirUnaryMath(block, inst), + //.cos => try sema.zirUnaryMath(block, inst), + //.exp => try sema.zirUnaryMath(block, inst), + //.exp2 => try sema.zirUnaryMath(block, inst), + //.log => try sema.zirUnaryMath(block, inst), + //.log2 => try sema.zirUnaryMath(block, inst), + //.log10 => try sema.zirUnaryMath(block, inst), + //.fabs => try sema.zirUnaryMath(block, inst), + //.floor => try sema.zirUnaryMath(block, inst), + //.ceil => try sema.zirUnaryMath(block, inst), + //.trunc => try sema.zirUnaryMath(block, inst), + //.round => try sema.zirUnaryMath(block, inst), + + //.opaque_decl => try sema.zirOpaqueDecl(block, inst, .parent), + //.opaque_decl_anon => try sema.zirOpaqueDecl(block, inst, .anon), + //.opaque_decl_func => try sema.zirOpaqueDecl(block, inst, .func), + //.error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent), + //.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), + //.error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), + + //.add => try sema.zirArithmetic(block, inst), + //.addwrap => try sema.zirArithmetic(block, inst), + //.div => try sema.zirArithmetic(block, inst), + //.mod_rem => try sema.zirArithmetic(block, inst), + //.mul => try sema.zirArithmetic(block, inst), + //.mulwrap => try sema.zirArithmetic(block, inst), + //.sub => try sema.zirArithmetic(block, inst), + //.subwrap => try sema.zirArithmetic(block, inst), + + //// Instructions that we know to *always* be noreturn based solely on their tag. + //// These functions match the return type of analyzeBody so that we can + //// tail call them here. + //.break_inline => return inst, + //.condbr => return sema.zirCondbr(block, inst), + //.@"break" => return sema.zirBreak(block, inst), + //.compile_error => return sema.zirCompileError(block, inst), + //.ret_coerce => return sema.zirRetCoerce(block, inst, true), + //.ret_node => return sema.zirRetNode(block, inst), + //.ret_err_value => return sema.zirRetErrValue(block, inst), + //.@"unreachable" => return sema.zirUnreachable(block, inst), + //.repeat => return sema.zirRepeat(block, inst), + //.panic => return sema.zirPanic(block, inst), + //// zig fmt: on + + //// Instructions that we know can *never* be noreturn based solely on + //// their tag. We avoid needlessly checking if they are noreturn and + //// continue the loop. + //// We also know that they cannot be referenced later, so we avoid + //// putting them into the map. + //.breakpoint => { + // try sema.zirBreakpoint(block, inst); + // i += 1; + // continue; + //}, + //.fence => { + // try sema.zirFence(block, inst); + // i += 1; + // continue; + //}, + //.dbg_stmt => { + // try sema.zirDbgStmt(block, inst); + // i += 1; + // continue; + //}, + //.ensure_err_payload_void => { + // try sema.zirEnsureErrPayloadVoid(block, inst); + // i += 1; + // continue; + //}, + //.ensure_result_non_error => { + // try sema.zirEnsureResultNonError(block, inst); + // i += 1; + // continue; + //}, + //.ensure_result_used => { + // try sema.zirEnsureResultUsed(block, inst); + // i += 1; + // continue; + //}, + //.set_eval_branch_quota => { + // try sema.zirSetEvalBranchQuota(block, inst); + // i += 1; + // continue; + //}, + //.store => { + // try sema.zirStore(block, inst); + // i += 1; + // continue; + //}, + //.store_node => { + // try sema.zirStoreNode(block, inst); + // i += 1; + // continue; + //}, + //.store_to_block_ptr => { + // try sema.zirStoreToBlockPtr(block, inst); + // i += 1; + // continue; + //}, + //.store_to_inferred_ptr => { + // try sema.zirStoreToInferredPtr(block, inst); + // i += 1; + // continue; + //}, + //.resolve_inferred_alloc => { + // try sema.zirResolveInferredAlloc(block, inst); + // i += 1; + // continue; + //}, + //.validate_struct_init_ptr => { + // try sema.zirValidateStructInitPtr(block, inst); + // i += 1; + // continue; + //}, + //.validate_array_init_ptr => { + // try sema.zirValidateArrayInitPtr(block, inst); + // i += 1; + // continue; + //}, + //.@"export" => { + // try sema.zirExport(block, inst); + // i += 1; + // continue; + //}, + //.set_align_stack => { + // try sema.zirSetAlignStack(block, inst); + // i += 1; + // continue; + //}, + //.set_cold => { + // try sema.zirSetCold(block, inst); + // i += 1; + // continue; + //}, + //.set_float_mode => { + // try sema.zirSetFloatMode(block, inst); + // i += 1; + // continue; + //}, + //.set_runtime_safety => { + // try sema.zirSetRuntimeSafety(block, inst); + // i += 1; + // continue; + //}, // Special case instructions to handle comptime control flow. .repeat_inline => { @@ -505,37 +505,38 @@ pub fn analyzeBody( i = 0; continue; }, - .block_inline => blk: { - // Directly analyze the block body without introducing a new block. - const inst_data = datas[inst].pl_node; - const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); - const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; - const break_inst = try sema.analyzeBody(block, inline_body); - const break_data = datas[break_inst].@"break"; - if (inst == break_data.block_inst) { - break :blk try sema.resolveInst(break_data.operand); - } else { - return break_inst; - } - }, - .condbr_inline => blk: { - const inst_data = datas[inst].pl_node; - const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; - const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); - const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; - const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); - const inline_body = if (cond.val.toBool()) then_body else else_body; - const break_inst = try sema.analyzeBody(block, inline_body); - const break_data = datas[break_inst].@"break"; - if (inst == break_data.block_inst) { - break :blk try sema.resolveInst(break_data.operand); - } else { - return break_inst; - } - }, + //.block_inline => blk: { + // // Directly analyze the block body without introducing a new block. + // const inst_data = datas[inst].pl_node; + // const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); + // const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; + // const break_inst = try sema.analyzeBody(block, inline_body); + // const break_data = datas[break_inst].@"break"; + // if (inst == break_data.block_inst) { + // break :blk try sema.resolveInst(break_data.operand); + // } else { + // return break_inst; + // } + //}, + //.condbr_inline => blk: { + // const inst_data = datas[inst].pl_node; + // const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; + // const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); + // const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; + // const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; + // const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); + // const inline_body = if (cond.val.toBool()) then_body else else_body; + // const break_inst = try sema.analyzeBody(block, inline_body); + // const break_data = datas[break_inst].@"break"; + // if (inst == break_data.block_inst) { + // break :blk try sema.resolveInst(break_data.operand); + // } else { + // return break_inst; + // } + //}, + else => @panic("TODO remove else prong"), }; - if (air_inst.ty.isNoReturn()) + if (sema.getAirType(air_inst).isNoReturn()) return always_noreturn; try map.put(sema.gpa, inst, air_inst); i += 1; @@ -577,18 +578,13 @@ fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } } -/// TODO when we rework AIR memory layout, this function will no longer have a possible error. -pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) error{OutOfMemory}!Air.Inst.Index { +pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) Air.Inst.Ref { var i: usize = @enumToInt(zir_ref); // First section of indexes correspond to a set number of constant values. if (i < Zir.Inst.Ref.typed_value_map.len) { - // TODO when we rework AIR memory layout, this function can be as simple as: - // if (zir_ref < Zir.const_inst_list.len + sema.param_count) - // return zir_ref; - // Until then we allocate memory for a new, mutable `ir.Inst` to match what - // AIR expects. - return sema.mod.constInst(sema.arena, .unneeded, Zir.Inst.Ref.typed_value_map[i]); + // We intentionally map the same indexes to the same values between ZIR and AIR. + return zir_ref; } i -= Zir.Inst.Ref.typed_value_map.len; @@ -1256,7 +1252,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const arg_name = inst_data.get(sema.code); const arg_index = sema.next_arg_index; @@ -1271,7 +1267,7 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air // Set the name of the Air.Arg instruction for use by codegen debug info. const air_arg = sema.param_inst_list[arg_index]; - sema.air.instructions.items(.data)[air_arg].ty_str.str = inst_data.start; + sema.air_instructions.items(.data)[air_arg].ty_str.str = inst_data.start; return air_arg; } @@ -7942,6 +7938,18 @@ fn enumFieldSrcLoc( } else unreachable; } +fn getAirType(sema: *Sema, air_ref: Air.Inst.Ref) Type { + var i: usize = @enumToInt(air_ref); + if (i < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; + } + i -= Air.Inst.Ref.typed_value_map.len; + const air_tags = sema.air_instructions.items(.tag); + const air_datas = sema.air_instructions.items(.data); + assert(air_tags[i] == .const_ty); + return air_datas[i].ty; +} + pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { switch (ty.tag()) { .u8 => return .u8_type, diff --git a/src/codegen.zig b/src/codegen.zig index eaf910977e..a6c4b5ad3c 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -282,7 +282,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return struct { gpa: *Allocator, - air: *const Air, + air: Air, + liveness: Liveness, bin_file: *link.File, target: *const std.Target, mod_fn: *const Module.Fn, @@ -468,8 +469,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { var function = Self{ .gpa = bin_file.allocator, - .air = &air, - .liveness = &liveness, + .air = air, + .liveness = liveness, .target = &bin_file.options.target, .bin_file = bin_file, .mod_fn = module_fn, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index e3f2423746..4743494f35 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -6,7 +6,6 @@ const log = std.log.scoped(.c); const link = @import("../link.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); -const Air = @import("../Air.zig"); const Value = @import("../value.zig").Value; const Type = @import("../type.zig").Type; const TypedValue = @import("../TypedValue.zig"); @@ -14,6 +13,8 @@ const C = link.File.C; const Decl = Module.Decl; const trace = @import("../tracy.zig").trace; const LazySrcLoc = Module.LazySrcLoc; +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const Mutability = enum { Const, Mut }; @@ -37,7 +38,7 @@ const BlockData = struct { result: CValue, }; -pub const CValueMap = std.AutoHashMap(*Inst, CValue); +pub const CValueMap = std.AutoHashMap(Air.Inst.Index, CValue); pub const TypedefMap = std.ArrayHashMap( Type, struct { name: []const u8, rendered: []u8 }, @@ -93,6 +94,8 @@ pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) { /// It is not available when generating .h file. pub const Object = struct { dg: DeclGen, + air: Air, + liveness: Liveness, gpa: *mem.Allocator, code: std.ArrayList(u8), value_map: CValueMap, @@ -102,7 +105,7 @@ pub const Object = struct { next_block_index: usize = 0, indent_writer: IndentWriter(std.ArrayList(u8).Writer), - fn resolveInst(o: *Object, inst: *Inst) !CValue { + fn resolveInst(o: *Object, inst: Air.Inst.Index) !CValue { if (inst.value()) |_| { return CValue{ .constant = inst }; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 45ee2d9bb8..ddf2883259 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -277,6 +277,9 @@ pub const Object = struct { } pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void { + const tracy = trace(@src()); + defer tracy.end(); + var dg: DeclGen = .{ .object = self, .module = module, diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 3d704a8dc5..4da320b087 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -159,7 +159,8 @@ pub const DeclGen = struct { /// The SPIR-V module code should be put in. spv: *SPIRVModule, - air: *const Air, + air: Air, + liveness: Liveness, /// An array of function argument result-ids. Each index corresponds with the /// function argument of the same index. diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 45b00ddfad..912577a358 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -9,13 +9,14 @@ const wasm = std.wasm; const Module = @import("../Module.zig"); const Decl = Module.Decl; -const Air = @import("../Air.zig"); const Type = @import("../type.zig").Type; const Value = @import("../value.zig").Value; const Compilation = @import("../Compilation.zig"); const LazySrcLoc = Module.LazySrcLoc; const link = @import("../link.zig"); const TypedValue = @import("../TypedValue.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); /// Wasm Value, created when generating an instruction const WValue = union(enum) { @@ -491,6 +492,8 @@ pub const Context = struct { /// Reference to the function declaration the code /// section belongs to decl: *Decl, + air: Air, + liveness: Liveness, gpa: *mem.Allocator, /// Table to save `WValue`'s generated by an `Inst` values: ValueTable, @@ -710,52 +713,53 @@ pub const Context = struct { } } + pub fn genFunc(self: *Context, func: *Module.Fn) InnerError!Result { + try self.genFunctype(); + + // Write instructions + // TODO: check for and handle death of instructions + + // Reserve space to write the size after generating the code as well as space for locals count + try self.code.resize(10); + + try self.genBody(func.body); + + // finally, write our local types at the 'offset' position + { + leb.writeUnsignedFixed(5, self.code.items[5..10], @intCast(u32, self.locals.items.len)); + + // offset into 'code' section where we will put our locals types + var local_offset: usize = 10; + + // emit the actual locals amount + for (self.locals.items) |local| { + var buf: [6]u8 = undefined; + leb.writeUnsignedFixed(5, buf[0..5], @as(u32, 1)); + buf[5] = local; + try self.code.insertSlice(local_offset, &buf); + local_offset += 6; + } + } + + const writer = self.code.writer(); + try writer.writeByte(wasm.opcode(.end)); + + // Fill in the size of the generated code to the reserved space at the + // beginning of the buffer. + const size = self.code.items.len - 5 + self.decl.fn_link.wasm.idx_refs.items.len * 5; + leb.writeUnsignedFixed(5, self.code.items[0..5], @intCast(u32, size)); + + // codegen data has been appended to `code` + return Result.appended; + } + /// Generates the wasm bytecode for the function declaration belonging to `Context` pub fn gen(self: *Context, typed_value: TypedValue) InnerError!Result { switch (typed_value.ty.zigTypeTag()) { .Fn => { try self.genFunctype(); - - // Write instructions - // TODO: check for and handle death of instructions - const mod_fn = blk: { - if (typed_value.val.castTag(.function)) |func| break :blk func.data; - if (typed_value.val.castTag(.extern_fn)) |_| return Result.appended; // don't need code body for extern functions - unreachable; - }; - - // Reserve space to write the size after generating the code as well as space for locals count - try self.code.resize(10); - - try self.genBody(mod_fn.body); - - // finally, write our local types at the 'offset' position - { - leb.writeUnsignedFixed(5, self.code.items[5..10], @intCast(u32, self.locals.items.len)); - - // offset into 'code' section where we will put our locals types - var local_offset: usize = 10; - - // emit the actual locals amount - for (self.locals.items) |local| { - var buf: [6]u8 = undefined; - leb.writeUnsignedFixed(5, buf[0..5], @as(u32, 1)); - buf[5] = local; - try self.code.insertSlice(local_offset, &buf); - local_offset += 6; - } - } - - const writer = self.code.writer(); - try writer.writeByte(wasm.opcode(.end)); - - // Fill in the size of the generated code to the reserved space at the - // beginning of the buffer. - const size = self.code.items.len - 5 + self.decl.fn_link.wasm.idx_refs.items.len * 5; - leb.writeUnsignedFixed(5, self.code.items[0..5], @intCast(u32, size)); - - // codegen data has been appended to `code` - return Result.appended; + if (typed_value.val.castTag(.extern_fn)) |_| return Result.appended; // don't need code body for extern functions + return self.fail("TODO implement wasm codegen for function pointers", .{}); }, .Array => { if (typed_value.val.castTag(.bytes)) |payload| { diff --git a/src/link.zig b/src/link.zig index 02d9afaf07..2403180ec8 100644 --- a/src/link.zig +++ b/src/link.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const Allocator = std.mem.Allocator; const fs = std.fs; @@ -14,8 +15,10 @@ const Cache = @import("Cache.zig"); const build_options = @import("build_options"); const LibCInstallation = @import("libc_installation.zig").LibCInstallation; const wasi_libc = @import("wasi_libc.zig"); +const Air = @import("Air.zig"); +const Liveness = @import("Liveness.zig"); -pub const producer_string = if (std.builtin.is_test) "zig test" else "zig " ++ build_options.version; +pub const producer_string = if (builtin.is_test) "zig test" else "zig " ++ build_options.version; pub const Emit = struct { /// Where the output will go. @@ -313,13 +316,34 @@ pub const File = struct { log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty }); assert(decl.has_tv); switch (base.tag) { - .coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl), - .elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl), + // zig fmt: off + .coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl), + .elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl), .macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl), - .c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl), - .wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl), + .c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl), + .wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl), .spirv => return @fieldParentPtr(SpirV, "base", base).updateDecl(module, decl), .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDecl(module, decl), + // zig fmt: on + } + } + + /// May be called before or after updateDeclExports but must be called + /// after allocateDeclIndexes for any given Decl. + pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + log.debug("updateFunc {*} ({s}), type={}", .{ + func.owner_decl, func.owner_decl.name, func.owner_decl.ty, + }); + switch (base.tag) { + // zig fmt: off + .coff => return @fieldParentPtr(Coff, "base", base).updateFunc(module, func, air, liveness), + .elf => return @fieldParentPtr(Elf, "base", base).updateFunc(module, func, air, liveness), + .macho => return @fieldParentPtr(MachO, "base", base).updateFunc(module, func, air, liveness), + .c => return @fieldParentPtr(C, "base", base).updateFunc(module, func, air, liveness), + .wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func, air, liveness), + .spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func, air, liveness), + .plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func, air, liveness), + // zig fmt: on } } diff --git a/src/link/C.zig b/src/link/C.zig index 53561d16cd..09f789f7d1 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -2,14 +2,17 @@ const std = @import("std"); const mem = std.mem; const assert = std.debug.assert; const Allocator = std.mem.Allocator; +const fs = std.fs; + +const C = @This(); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); -const fs = std.fs; const codegen = @import("../codegen/c.zig"); const link = @import("../link.zig"); const trace = @import("../tracy.zig").trace; -const C = @This(); const Type = @import("../type.zig").Type; +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); pub const base_tag: link.File.Tag = .c; pub const zig_h = @embedFile("C/zig.h"); @@ -95,10 +98,7 @@ fn deinitDecl(gpa: *Allocator, decl: *Module.Decl) void { decl.fn_link.c.typedefs.deinit(gpa); } -pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { - const tracy = trace(@src()); - defer tracy.end(); - +pub fn finishUpdateDecl(self: *C, module: *Module, decl: *Module.Decl, air: Air, liveness: Liveness) !void { // Keep track of all decls so we can iterate over them on flush(). _ = try self.decl_table.getOrPut(self.base.allocator, decl); @@ -126,6 +126,8 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { .code = code.toManaged(module.gpa), .value_map = codegen.CValueMap.init(module.gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code + .air = air, + .liveness = liveness, }; object.indent_writer = .{ .underlying_writer = object.code.writer() }; defer { @@ -157,6 +159,20 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { code.shrinkAndFree(module.gpa, code.items.len); } +pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + const tracy = trace(@src()); + defer tracy.end(); + + return self.finishUpdateDecl(module, func.owner_decl, air, liveness); +} + +pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { + const tracy = trace(@src()); + defer tracy.end(); + + return self.finishUpdateDecl(module, decl, undefined, undefined); +} + pub fn updateDeclLineNumber(self: *C, module: *Module, decl: *Module.Decl) !void { // The C backend does not have the ability to fix line numbers without re-generating // the entire Decl. diff --git a/src/link/Coff.zig b/src/link/Coff.zig index b466cf9136..44442b73a3 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1,6 +1,7 @@ const Coff = @This(); const std = @import("std"); +const builtin = @import("builtin"); const log = std.log.scoped(.link); const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -17,6 +18,8 @@ const build_options = @import("build_options"); const Cache = @import("../Cache.zig"); const mingw = @import("../mingw.zig"); const llvm_backend = @import("../codegen/llvm.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const allocation_padding = 4 / 3; const minimum_text_block_size = 64 * allocation_padding; @@ -653,19 +656,58 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { } } -pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { - // TODO COFF/PE debug information - // TODO Implement exports +pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .coff and builtin.object_format != .pe) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } const tracy = trace(@src()); defer tracy.end(); - if (build_options.have_llvm) - if (self.llvm_object) |llvm_object| return try llvm_object.updateDecl(module, decl); + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + const res = try codegen.generateFunction( + &self.base, + decl.srcLoc(), + func, + air, + liveness, + &code_buffer, + .none, + ); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + return self.finishUpdateDecl(module, func.owner_decl, code); +} + +pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native and builtin.object_format != .coff and builtin.object_format != .pe) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } + const tracy = trace(@src()); + defer tracy.end(); if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? } + // TODO COFF/PE debug information + // TODO Implement exports + var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); @@ -683,6 +725,10 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { }, }; + return self.finishUpdateDecl(module, func.owner_decl, code); +} + +fn finishUpdateDecl(self: *Coff, decl: *Module.Decl, code: []const u8) !void { const required_alignment = decl.ty.abiAlignment(self.base.options.target); const curr_size = decl.link.coff.size; if (curr_size != 0) { diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 90224866ba..0d05b97846 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1,6 +1,7 @@ const Elf = @This(); const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const assert = std.debug.assert; const Allocator = std.mem.Allocator; @@ -10,7 +11,6 @@ const log = std.log.scoped(.link); const DW = std.dwarf; const leb128 = std.leb; -const Air = @import("../Air.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); const codegen = @import("../codegen.zig"); @@ -26,6 +26,8 @@ const glibc = @import("../glibc.zig"); const musl = @import("../musl.zig"); const Cache = @import("../Cache.zig"); const llvm_backend = @import("../codegen/llvm.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const default_entry_addr = 0x8000000; @@ -2155,138 +2157,17 @@ pub fn freeDecl(self: *Elf, decl: *Module.Decl) void { } } -pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { - const tracy = trace(@src()); - defer tracy.end(); - - if (build_options.have_llvm) - if (self.llvm_object) |llvm_object| return try llvm_object.updateDecl(module, decl); - - if (decl.val.tag() == .extern_fn) { - return; // TODO Should we do more when front-end analyzed extern decl? - } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; - if (variable.is_extern) { - return; // TODO Should we do more when front-end analyzed extern decl? - } - } - - var code_buffer = std.ArrayList(u8).init(self.base.allocator); - defer code_buffer.deinit(); - - var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); - defer dbg_line_buffer.deinit(); - - var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); - defer dbg_info_buffer.deinit(); - - var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; - defer { - var it = dbg_info_type_relocs.valueIterator(); - while (it.next()) |value| { - value.relocs.deinit(self.base.allocator); - } - dbg_info_type_relocs.deinit(self.base.allocator); - } - - const is_fn: bool = switch (decl.ty.zigTypeTag()) { - .Fn => true, - else => false, - }; - if (is_fn) { - // For functions we need to add a prologue to the debug line program. - try dbg_line_buffer.ensureCapacity(26); - - const func = decl.val.castTag(.function).?.data; - const line_off = @intCast(u28, decl.src_line + func.lbrace_line); - - const ptr_width_bytes = self.ptrWidthBytes(); - dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{ - DW.LNS_extended_op, - ptr_width_bytes + 1, - DW.LNE_set_address, - }); - // This is the "relocatable" vaddr, corresponding to `code_buffer` index `0`. - assert(dbg_line_vaddr_reloc_index == dbg_line_buffer.items.len); - dbg_line_buffer.items.len += ptr_width_bytes; - - dbg_line_buffer.appendAssumeCapacity(DW.LNS_advance_line); - // This is the "relocatable" relative line offset from the previous function's end curly - // to this function's begin curly. - assert(self.getRelocDbgLineOff() == dbg_line_buffer.items.len); - // Here we use a ULEB128-fixed-4 to make sure this field can be overwritten later. - leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line_off); - - dbg_line_buffer.appendAssumeCapacity(DW.LNS_set_file); - assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len); - // Once we support more than one source file, this will have the ability to be more - // than one possible value. - const file_index = 1; - leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index); - - // Emit a line for the begin curly with prologue_end=false. The codegen will - // do the work of setting prologue_end=true and epilogue_begin=true. - dbg_line_buffer.appendAssumeCapacity(DW.LNS_copy); - - // .debug_info subprogram - const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1]; - try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len); - - const fn_ret_type = decl.ty.fnReturnType(); - const fn_ret_has_bits = fn_ret_type.hasCodeGenBits(); - if (fn_ret_has_bits) { - dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram); - } else { - dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram_retvoid); - } - // These get overwritten after generating the machine code. These values are - // "relocations" and have to be in this fixed place so that functions can be - // moved in virtual address space. - assert(dbg_info_low_pc_reloc_index == dbg_info_buffer.items.len); - dbg_info_buffer.items.len += ptr_width_bytes; // DW.AT_low_pc, DW.FORM_addr - assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len); - dbg_info_buffer.items.len += 4; // DW.AT_high_pc, DW.FORM_data4 - if (fn_ret_has_bits) { - const gop = try dbg_info_type_relocs.getOrPut(self.base.allocator, fn_ret_type); - if (!gop.found_existing) { - gop.value_ptr.* = .{ - .off = undefined, - .relocs = .{}, - }; - } - try gop.value_ptr.relocs.append(self.base.allocator, @intCast(u32, dbg_info_buffer.items.len)); - dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4 - } - dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string - } else { - // TODO implement .debug_info for global variables +fn deinitRelocs(gpa: *Allocator, table: *File.DbgInfoTypeRelocsTable) void { + var it = table.valueIterator(); + while (it.next()) |value| { + value.relocs.deinit(gpa); } - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ - .ty = decl.ty, - .val = decl_val, - }, &code_buffer, .{ - .dwarf = .{ - .dbg_line = &dbg_line_buffer, - .dbg_info = &dbg_info_buffer, - .dbg_info_type_relocs = &dbg_info_type_relocs, - }, - }); - const code = switch (res) { - .externally_managed => |x| x, - .appended => code_buffer.items, - .fail => |em| { - decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); - return; - }, - }; + table.deinit(gpa); +} +fn updateDeclCode(self: *Elf, decl: *Module.Decl, code: []const u8, stt_bits: u8) !*elf.Elf64_Sym { const required_alignment = decl.ty.abiAlignment(self.base.options.target); - const stt_bits: u8 = if (is_fn) elf.STT_FUNC else elf.STT_OBJECT; - assert(decl.link.elf.local_sym_index != 0); // Caller forgot to allocateDeclIndexes() const local_sym = &self.local_symbols.items[decl.link.elf.local_sym_index]; if (local_sym.st_size != 0) { @@ -2338,128 +2219,16 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { const file_offset = self.sections.items[self.text_section_index.?].sh_offset + section_offset; try self.base.file.?.pwriteAll(code, file_offset); - const target_endian = self.base.options.target.cpu.arch.endian(); - - const text_block = &decl.link.elf; - - // If the Decl is a function, we need to update the .debug_line program. - if (is_fn) { - // Perform the relocations based on vaddr. - switch (self.ptr_width) { - .p32 => { - { - const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); - } - { - const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); - } - }, - .p64 => { - { - const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..8]; - mem.writeInt(u64, ptr, local_sym.st_value, target_endian); - } - { - const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..8]; - mem.writeInt(u64, ptr, local_sym.st_value, target_endian); - } - }, - } - { - const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_size), target_endian); - } - - try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS_extended_op, 1, DW.LNE_end_sequence }); - - // Now we have the full contents and may allocate a region to store it. - - // This logic is nearly identical to the logic below in `updateDeclDebugInfoAllocation` for - // `TextBlock` and the .debug_info. If you are editing this logic, you - // probably need to edit that logic too. - - const debug_line_sect = &self.sections.items[self.debug_line_section_index.?]; - const src_fn = &decl.fn_link.elf; - src_fn.len = @intCast(u32, dbg_line_buffer.items.len); - if (self.dbg_line_fn_last) |last| not_first: { - if (src_fn.next) |next| { - // Update existing function - non-last item. - if (src_fn.off + src_fn.len + min_nop_size > next.off) { - // It grew too big, so we move it to a new location. - if (src_fn.prev) |prev| { - self.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {}; - prev.next = src_fn.next; - } - assert(src_fn.prev != next); - next.prev = src_fn.prev; - src_fn.next = null; - // Populate where it used to be with NOPs. - const file_pos = debug_line_sect.sh_offset + src_fn.off; - try self.pwriteDbgLineNops(0, &[0]u8{}, src_fn.len, file_pos); - // TODO Look at the free list before appending at the end. - src_fn.prev = last; - last.next = src_fn; - self.dbg_line_fn_last = src_fn; - - src_fn.off = last.off + padToIdeal(last.len); - } - } else if (src_fn.prev == null) { - if (src_fn == last) { - // Special case: there is only 1 function and it is being updated. - // In this case there is nothing to do. The function's length has - // already been updated, and the logic below takes care of - // resizing the .debug_line section. - break :not_first; - } - // Append new function. - // TODO Look at the free list before appending at the end. - src_fn.prev = last; - last.next = src_fn; - self.dbg_line_fn_last = src_fn; - - src_fn.off = last.off + padToIdeal(last.len); - } - } else { - // This is the first function of the Line Number Program. - self.dbg_line_fn_first = src_fn; - self.dbg_line_fn_last = src_fn; - - src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes()); - } - - const last_src_fn = self.dbg_line_fn_last.?; - const needed_size = last_src_fn.off + last_src_fn.len; - if (needed_size != debug_line_sect.sh_size) { - if (needed_size > self.allocatedSize(debug_line_sect.sh_offset)) { - const new_offset = self.findFreeSpace(needed_size, 1); - const existing_size = last_src_fn.off; - log.debug("moving .debug_line section: {d} bytes from 0x{x} to 0x{x}", .{ - existing_size, - debug_line_sect.sh_offset, - new_offset, - }); - const amt = try self.base.file.?.copyRangeAll(debug_line_sect.sh_offset, self.base.file.?, new_offset, existing_size); - if (amt != existing_size) return error.InputOutput; - debug_line_sect.sh_offset = new_offset; - } - debug_line_sect.sh_size = needed_size; - self.shdr_table_dirty = true; // TODO look into making only the one section dirty - self.debug_line_header_dirty = true; - } - const prev_padding_size: u32 = if (src_fn.prev) |prev| src_fn.off - (prev.off + prev.len) else 0; - const next_padding_size: u32 = if (src_fn.next) |next| next.off - (src_fn.off + src_fn.len) else 0; - - // We only have support for one compilation unit so far, so the offsets are directly - // from the .debug_line section. - const file_pos = debug_line_sect.sh_offset + src_fn.off; - try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos); - - // .debug_info - End the TAG_subprogram children. - try dbg_info_buffer.append(0); - } + return local_sym; +} +fn finishUpdateDecl( + self: *Elf, + module: *Module, + decl: *Module.Decl, + dbg_info_type_relocs: *File.DbgInfoTypeRelocsTable, + dbg_info_buffer: *std.ArrayList(u8), +) !void { // Now we emit the .debug_info types of the Decl. These will count towards the size of // the buffer, so we have to do it before computing the offset, and we can't perform the actual // relocations yet. @@ -2467,12 +2236,15 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { var it = dbg_info_type_relocs.iterator(); while (it.next()) |entry| { entry.value_ptr.off = @intCast(u32, dbg_info_buffer.items.len); - try self.addDbgInfoType(entry.key_ptr.*, &dbg_info_buffer); + try self.addDbgInfoType(entry.key_ptr.*, dbg_info_buffer); } } + const text_block = &decl.link.elf; try self.updateDeclDebugInfoAllocation(text_block, @intCast(u32, dbg_info_buffer.items.len)); + const target_endian = self.base.options.target.cpu.arch.endian(); + { // Now that we have the offset assigned we can finally perform type relocations. var it = dbg_info_type_relocs.valueIterator(); @@ -2495,6 +2267,290 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { return self.updateDeclExports(module, decl, decl_exports); } +pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .elf) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } + + const tracy = trace(@src()); + defer tracy.end(); + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_line_buffer.deinit(); + + var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_info_buffer.deinit(); + + var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; + defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs); + + // For functions we need to add a prologue to the debug line program. + try dbg_line_buffer.ensureCapacity(26); + + const decl = func.owner_decl; + const line_off = @intCast(u28, decl.src_line + func.lbrace_line); + + const ptr_width_bytes = self.ptrWidthBytes(); + dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{ + DW.LNS_extended_op, + ptr_width_bytes + 1, + DW.LNE_set_address, + }); + // This is the "relocatable" vaddr, corresponding to `code_buffer` index `0`. + assert(dbg_line_vaddr_reloc_index == dbg_line_buffer.items.len); + dbg_line_buffer.items.len += ptr_width_bytes; + + dbg_line_buffer.appendAssumeCapacity(DW.LNS_advance_line); + // This is the "relocatable" relative line offset from the previous function's end curly + // to this function's begin curly. + assert(self.getRelocDbgLineOff() == dbg_line_buffer.items.len); + // Here we use a ULEB128-fixed-4 to make sure this field can be overwritten later. + leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line_off); + + dbg_line_buffer.appendAssumeCapacity(DW.LNS_set_file); + assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len); + // Once we support more than one source file, this will have the ability to be more + // than one possible value. + const file_index = 1; + leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index); + + // Emit a line for the begin curly with prologue_end=false. The codegen will + // do the work of setting prologue_end=true and epilogue_begin=true. + dbg_line_buffer.appendAssumeCapacity(DW.LNS_copy); + + // .debug_info subprogram + const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1]; + try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len); + + const fn_ret_type = decl.ty.fnReturnType(); + const fn_ret_has_bits = fn_ret_type.hasCodeGenBits(); + if (fn_ret_has_bits) { + dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram); + } else { + dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram_retvoid); + } + // These get overwritten after generating the machine code. These values are + // "relocations" and have to be in this fixed place so that functions can be + // moved in virtual address space. + assert(dbg_info_low_pc_reloc_index == dbg_info_buffer.items.len); + dbg_info_buffer.items.len += ptr_width_bytes; // DW.AT_low_pc, DW.FORM_addr + assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len); + dbg_info_buffer.items.len += 4; // DW.AT_high_pc, DW.FORM_data4 + if (fn_ret_has_bits) { + const gop = try dbg_info_type_relocs.getOrPut(self.base.allocator, fn_ret_type); + if (!gop.found_existing) { + gop.value_ptr.* = .{ + .off = undefined, + .relocs = .{}, + }; + } + try gop.value_ptr.relocs.append(self.base.allocator, @intCast(u32, dbg_info_buffer.items.len)); + dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4 + } + dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string + + const res = try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg_line_buffer, + .dbg_info = &dbg_info_buffer, + .dbg_info_type_relocs = &dbg_info_type_relocs, + }, + }); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + const local_sym = try self.updateDeclCode(decl, code, elf.STT_FUNC); + + const target_endian = self.base.options.target.cpu.arch.endian(); + + // Since the Decl is a function, we need to update the .debug_line program. + // Perform the relocations based on vaddr. + switch (self.ptr_width) { + .p32 => { + { + const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4]; + mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); + } + { + const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4]; + mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); + } + }, + .p64 => { + { + const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..8]; + mem.writeInt(u64, ptr, local_sym.st_value, target_endian); + } + { + const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..8]; + mem.writeInt(u64, ptr, local_sym.st_value, target_endian); + } + }, + } + { + const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4]; + mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_size), target_endian); + } + + try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS_extended_op, 1, DW.LNE_end_sequence }); + + // Now we have the full contents and may allocate a region to store it. + + // This logic is nearly identical to the logic below in `updateDeclDebugInfoAllocation` for + // `TextBlock` and the .debug_info. If you are editing this logic, you + // probably need to edit that logic too. + + const debug_line_sect = &self.sections.items[self.debug_line_section_index.?]; + const src_fn = &decl.fn_link.elf; + src_fn.len = @intCast(u32, dbg_line_buffer.items.len); + if (self.dbg_line_fn_last) |last| not_first: { + if (src_fn.next) |next| { + // Update existing function - non-last item. + if (src_fn.off + src_fn.len + min_nop_size > next.off) { + // It grew too big, so we move it to a new location. + if (src_fn.prev) |prev| { + self.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {}; + prev.next = src_fn.next; + } + assert(src_fn.prev != next); + next.prev = src_fn.prev; + src_fn.next = null; + // Populate where it used to be with NOPs. + const file_pos = debug_line_sect.sh_offset + src_fn.off; + try self.pwriteDbgLineNops(0, &[0]u8{}, src_fn.len, file_pos); + // TODO Look at the free list before appending at the end. + src_fn.prev = last; + last.next = src_fn; + self.dbg_line_fn_last = src_fn; + + src_fn.off = last.off + padToIdeal(last.len); + } + } else if (src_fn.prev == null) { + if (src_fn == last) { + // Special case: there is only 1 function and it is being updated. + // In this case there is nothing to do. The function's length has + // already been updated, and the logic below takes care of + // resizing the .debug_line section. + break :not_first; + } + // Append new function. + // TODO Look at the free list before appending at the end. + src_fn.prev = last; + last.next = src_fn; + self.dbg_line_fn_last = src_fn; + + src_fn.off = last.off + padToIdeal(last.len); + } + } else { + // This is the first function of the Line Number Program. + self.dbg_line_fn_first = src_fn; + self.dbg_line_fn_last = src_fn; + + src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes()); + } + + const last_src_fn = self.dbg_line_fn_last.?; + const needed_size = last_src_fn.off + last_src_fn.len; + if (needed_size != debug_line_sect.sh_size) { + if (needed_size > self.allocatedSize(debug_line_sect.sh_offset)) { + const new_offset = self.findFreeSpace(needed_size, 1); + const existing_size = last_src_fn.off; + log.debug("moving .debug_line section: {d} bytes from 0x{x} to 0x{x}", .{ + existing_size, + debug_line_sect.sh_offset, + new_offset, + }); + const amt = try self.base.file.?.copyRangeAll(debug_line_sect.sh_offset, self.base.file.?, new_offset, existing_size); + if (amt != existing_size) return error.InputOutput; + debug_line_sect.sh_offset = new_offset; + } + debug_line_sect.sh_size = needed_size; + self.shdr_table_dirty = true; // TODO look into making only the one section dirty + self.debug_line_header_dirty = true; + } + const prev_padding_size: u32 = if (src_fn.prev) |prev| src_fn.off - (prev.off + prev.len) else 0; + const next_padding_size: u32 = if (src_fn.next) |next| next.off - (src_fn.off + src_fn.len) else 0; + + // We only have support for one compilation unit so far, so the offsets are directly + // from the .debug_line section. + const file_pos = debug_line_sect.sh_offset + src_fn.off; + try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos); + + // .debug_info - End the TAG_subprogram children. + try dbg_info_buffer.append(0); + + return self.finishUpdateDecl(module, decl, &dbg_info_type_relocs, &dbg_info_buffer); +} + +pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native and builtin.object_format != .elf) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } + + const tracy = trace(@src()); + defer tracy.end(); + + if (decl.val.tag() == .extern_fn) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + if (decl.val.castTag(.variable)) |payload| { + const variable = payload.data; + if (variable.is_extern) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + } + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_info_buffer.deinit(); + + var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; + defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs); + + // TODO implement .debug_info for global variables + const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + .ty = decl.ty, + .val = decl_val, + }, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg_line_buffer, + .dbg_info = &dbg_info_buffer, + .dbg_info_type_relocs = &dbg_info_type_relocs, + }, + }); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + _ = try self.updateDeclCode(decl, code, elf.STT_OBJECT); + return self.finishUpdateDecl(module, decl, &dbg_info_type_relocs, &dbg_info_buffer); +} + /// Asserts the type has codegen bits. fn addDbgInfoType(self: *Elf, ty: Type, dbg_info_buffer: *std.ArrayList(u8)) !void { switch (ty.zigTypeTag()) { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index df2e0134e4..cd020c1b27 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1,6 +1,7 @@ const MachO = @This(); const std = @import("std"); +const builtin = @import("builtin"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; const fmt = std.fmt; @@ -22,6 +23,8 @@ const link = @import("../link.zig"); const File = link.File; const Cache = @import("../Cache.zig"); const target_util = @import("../target.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const DebugSymbols = @import("MachO/DebugSymbols.zig"); const Trie = @import("MachO/Trie.zig"); @@ -1132,7 +1135,55 @@ pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void { }; } +pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .macho) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } + const tracy = trace(@src()); + defer tracy.end(); + + const decl = func.owner_decl; + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var debug_buffers = if (self.d_sym) |*ds| try ds.initDeclDebugBuffers(self.base.allocator, module, decl) else null; + defer { + if (debug_buffers) |*dbg| { + dbg.dbg_line_buffer.deinit(); + dbg.dbg_info_buffer.deinit(); + var it = dbg.dbg_info_type_relocs.valueIterator(); + while (it.next()) |value| { + value.relocs.deinit(self.base.allocator); + } + dbg.dbg_info_type_relocs.deinit(self.base.allocator); + } + } + + const res = if (debug_buffers) |*dbg| + try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg.dbg_line_buffer, + .dbg_info = &dbg.dbg_info_buffer, + .dbg_info_type_relocs = &dbg.dbg_info_type_relocs, + }, + }) + else + try codegen.generateSymbol(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); + + return self.finishUpdateDecl(module, decl, res); +} + pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native and builtin.object_format != .macho) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } const tracy = trace(@src()); defer tracy.end(); @@ -1173,6 +1224,10 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { .val = decl.val, }, &code_buffer, .none); + return self.finishUpdateDecl(module, decl, res); +} + +fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: codegen.Result) !void { const code = switch (res) { .externally_managed => |x| x, .appended => code_buffer.items, diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 80a92f9cdb..bc044ce414 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -2,18 +2,21 @@ //! would be to add incremental linking in a similar way as ELF does. const Plan9 = @This(); - -const std = @import("std"); const link = @import("../link.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); const aout = @import("Plan9/aout.zig"); const codegen = @import("../codegen.zig"); const trace = @import("../tracy.zig").trace; -const mem = std.mem; const File = link.File; -const Allocator = std.mem.Allocator; +const build_options = @import("build_options"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); +const std = @import("std"); +const builtin = @import("builtin"); +const mem = std.mem; +const Allocator = std.mem.Allocator; const log = std.log.scoped(.link); const assert = std.debug.assert; @@ -120,6 +123,19 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Plan9 { return self; } +pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .plan9) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + _ = module; + // Keep track of all decls so we can iterate over them on flush(). + _ = try self.decl_table.getOrPut(self.base.allocator, func.owner_decl); + + _ = air; + _ = liveness; + @panic("TODO Plan9 needs to keep track of Air and Liveness so it can use them later"); +} + pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void { _ = module; _ = try self.decl_table.getOrPut(self.base.allocator, decl); @@ -138,6 +154,9 @@ pub fn flush(self: *Plan9, comp: *Compilation) !void { } pub fn flushModule(self: *Plan9, comp: *Compilation) !void { + if (build_options.skip_non_native and builtin.object_format != .plan9) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } _ = comp; const tracy = trace(@src()); defer tracy.end(); @@ -199,7 +218,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { } } if (std.mem.eql(u8, exp.options.name, "_start")) { - std.debug.assert(decl.link.plan9.type == .t); // we tried to link a non-function as the entry + assert(decl.link.plan9.type == .t); // we tried to link a non-function as the entry self.entry_decl = decl; } if (exp.link.plan9) |i| { diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 8a2e877d42..bc9e560582 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -36,6 +36,8 @@ const ResultId = codegen.ResultId; const trace = @import("../tracy.zig").trace; const build_options = @import("build_options"); const spec = @import("../codegen/spirv/spec.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); // TODO: Should this struct be used at all rather than just a hashmap of aux data for every decl? pub const FnData = struct { @@ -101,7 +103,23 @@ pub fn deinit(self: *SpirV) void { self.decl_table.deinit(self.base.allocator); } +pub fn updateFunc(self: *SpirV, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native) { + @panic("Attempted to compile for architecture that was disabled by build configuration"); + } + _ = module; + // Keep track of all decls so we can iterate over them on flush(). + _ = try self.decl_table.getOrPut(self.base.allocator, func.owner_decl); + + _ = air; + _ = liveness; + @panic("TODO SPIR-V needs to keep track of Air and Liveness so it can use them later"); +} + pub fn updateDecl(self: *SpirV, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native) { + @panic("Attempted to compile for architecture that was disabled by build configuration"); + } _ = module; // Keep track of all decls so we can iterate over them on flush(). _ = try self.decl_table.getOrPut(self.base.allocator, decl); @@ -132,13 +150,13 @@ pub fn flush(self: *SpirV, comp: *Compilation) !void { } pub fn flushModule(self: *SpirV, comp: *Compilation) !void { - const tracy = trace(@src()); - defer tracy.end(); - if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } + const tracy = trace(@src()); + defer tracy.end(); + const module = self.base.options.module.?; const target = comp.getTarget(); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 15a36a4bcc..be6ad78701 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1,6 +1,7 @@ const Wasm = @This(); const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -18,6 +19,8 @@ const build_options = @import("build_options"); const wasi_libc = @import("../wasi_libc.zig"); const Cache = @import("../Cache.zig"); const TypedValue = @import("../TypedValue.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); pub const base_tag = link.File.Tag.wasm; @@ -186,11 +189,60 @@ pub fn allocateDeclIndexes(self: *Wasm, decl: *Module.Decl) !void { } } +pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .wasm) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } + const decl = func.owner_decl; + assert(decl.link.wasm.init); // Must call allocateDeclIndexes() + + const fn_data = &decl.fn_link.wasm; + fn_data.functype.items.len = 0; + fn_data.code.items.len = 0; + fn_data.idx_refs.items.len = 0; + + var context = codegen.Context{ + .gpa = self.base.allocator, + .air = air, + .liveness = liveness, + .values = .{}, + .code = fn_data.code.toManaged(self.base.allocator), + .func_type_data = fn_data.functype.toManaged(self.base.allocator), + .decl = decl, + .err_msg = undefined, + .locals = .{}, + .target = self.base.options.target, + .global_error_set = self.base.options.module.?.global_error_set, + }; + defer context.deinit(); + + // generate the 'code' section for the function declaration + const result = context.genFunc(func) catch |err| switch (err) { + error.CodegenFail => { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, context.err_msg); + return; + }, + else => |e| return e, + }; + return self.finishUpdateDecl(decl, result); +} + // Generate code for the Decl, storing it in memory to be later written to // the file on flush(). pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { - std.debug.assert(decl.link.wasm.init); // Must call allocateDeclIndexes() + if (build_options.skip_non_native and builtin.object_format != .wasm) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } + assert(decl.link.wasm.init); // Must call allocateDeclIndexes() + // TODO don't use this for non-functions const fn_data = &decl.fn_link.wasm; fn_data.functype.items.len = 0; fn_data.code.items.len = 0; @@ -218,7 +270,10 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { }, else => |e| return e, }; + return self.finishUpdateDecl(decl, result); +} +fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, result: codegen.Result) !void { const code: []const u8 = switch (result) { .appended => @as([]const u8, context.code.items), .externally_managed => |payload| payload, @@ -521,7 +576,7 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void { var data_offset = offset_table_size; while (cur) |cur_block| : (cur = cur_block.next) { if (cur_block.size == 0) continue; - std.debug.assert(cur_block.init); + assert(cur_block.init); const offset = (cur_block.offset_index) * ptr_width; var buf: [4]u8 = undefined; -- cgit v1.2.3 From c09b973ec25f328f5e15e9e6eed4da7f5e4634af Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 13 Jul 2021 15:45:08 -0700 Subject: stage2: compile error fixes for AIR memory layout branch Now the branch is compiling again, provided that one uses `-Dskip-non-native`, but many code paths are disabled. The code paths can now be re-enabled one at a time and updated to conform to the new AIR memory layout. --- src/Air.zig | 30 +- src/Compilation.zig | 2 +- src/Liveness.zig | 71 ++-- src/Module.zig | 34 +- src/Sema.zig | 986 +++++++++++++++++++++++++++++----------------------- src/codegen.zig | 159 +++++---- src/codegen/c.zig | 204 +++++------ src/link/Elf.zig | 3 + src/value.zig | 2 +- 9 files changed, 851 insertions(+), 640 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Air.zig b/src/Air.zig index e85f2e5c43..1f294c43f3 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -332,12 +332,12 @@ pub const Block = struct { body_len: u32, }; -/// Trailing is a list of `Ref` for every `args_len`. +/// Trailing is a list of `Inst.Ref` for every `args_len`. pub const Call = struct { args_len: u32, }; -/// This data is stored inside extra, with two sets of trailing `Ref`: +/// This data is stored inside extra, with two sets of trailing `Inst.Ref`: /// * 0. the then body, according to `then_body_len`. /// * 1. the else body, according to `else_body_len`. pub const CondBr = struct { @@ -355,19 +355,19 @@ pub const SwitchBr = struct { /// Trailing: /// * instruction index for each `body_len`. pub const Case = struct { - item: Ref, + item: Inst.Ref, body_len: u32, }; }; pub const StructField = struct { - struct_ptr: Ref, + struct_ptr: Inst.Ref, field_index: u32, }; /// Trailing: -/// 0. `Ref` for every outputs_len -/// 1. `Ref` for every inputs_len +/// 0. `Inst.Ref` for every outputs_len +/// 1. `Inst.Ref` for every inputs_len pub const Asm = struct { /// Index to the corresponding ZIR instruction. /// `asm_source`, `outputs_len`, `inputs_len`, `clobbers_len`, `is_volatile`, and @@ -381,6 +381,24 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { return air.extra[body_index..][0..body_len]; } +pub fn getType(air: Air, inst: Air.Inst.Index) Type { + _ = air; + _ = inst; + @panic("TODO Air getType"); +} + +pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { + var i: usize = @enumToInt(ref); + if (i < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; + } + i -= Air.Inst.Ref.typed_value_map.len; + const air_tags = air.instructions.items(.tag); + const air_datas = air.instructions.items(.data); + assert(air_tags[i] == .const_ty); + return air_datas[i].ty; +} + /// Returns the requested data, as well as the new index which is at the start of the /// trailers for the object. pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end: usize } { diff --git a/src/Compilation.zig b/src/Compilation.zig index 90224a77d1..4a442a8b67 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2023,7 +2023,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor defer air.deinit(gpa); log.debug("analyze liveness of {s}", .{decl.name}); - var liveness = try Liveness.analyze(gpa, air); + var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); defer liveness.deinit(gpa); if (std.builtin.mode == .Debug and self.verbose_air) { diff --git a/src/Liveness.zig b/src/Liveness.zig index 1402a5997b..838f19d4a1 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -7,11 +7,13 @@ //! * Switch Branches const Liveness = @This(); const std = @import("std"); -const Air = @import("Air.zig"); const trace = @import("tracy.zig").trace; const log = std.log.scoped(.liveness); const assert = std.debug.assert; const Allocator = std.mem.Allocator; +const Air = @import("Air.zig"); +const Zir = @import("Zir.zig"); +const Log2Int = std.math.Log2Int; /// This array is split into sets of 4 bits per AIR instruction. /// The MSB (0bX000) is whether the instruction is unreferenced. @@ -44,7 +46,7 @@ pub const SwitchBr = struct { else_death_count: u32, }; -pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { +pub fn analyze(gpa: *Allocator, air: Air, zir: Zir) Allocator.Error!Liveness { const tracy = trace(@src()); defer tracy.end(); @@ -58,6 +60,7 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { ), .extra = .{}, .special = .{}, + .zir = &zir, }; errdefer gpa.free(a.tomb_bits); errdefer a.special.deinit(gpa); @@ -74,23 +77,32 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { }; } +pub fn getTombBits(l: Liveness, inst: Air.Inst.Index) Bpi { + const usize_index = (inst * bpi) / @bitSizeOf(usize); + return @truncate(Bpi, l.tomb_bits[usize_index] >> + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi)); +} + pub fn isUnused(l: Liveness, inst: Air.Inst.Index) bool { const usize_index = (inst * bpi) / @bitSizeOf(usize); - const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1)); + const mask = @as(usize, 1) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1)); return (l.tomb_bits[usize_index] & mask) != 0; } pub fn operandDies(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) bool { assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); - const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); + const mask = @as(usize, 1) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); return (l.tomb_bits[usize_index] & mask) != 0; } pub fn clearOperandDeath(l: *Liveness, inst: Air.Inst.Index, operand: OperandInt) void { assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); - const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); + const mask = @as(usize, 1) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); l.tomb_bits[usize_index] |= mask; } @@ -113,10 +125,12 @@ const Analysis = struct { tomb_bits: []usize, special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), extra: std.ArrayListUnmanaged(u32), + zir: *const Zir, fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void { const usize_index = (inst * bpi) / @bitSizeOf(usize); - a.tomb_bits[usize_index] |= tomb_bits << (inst % (@bitSizeOf(usize) / bpi)) * bpi; + a.tomb_bits[usize_index] |= @as(usize, tomb_bits) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi); } fn addExtra(a: *Analysis, extra: anytype) Allocator.Error!u32 { @@ -203,9 +217,11 @@ fn analyzeInst( return trackOperands(a, new_set, inst, main_tomb, .{ o.lhs, o.rhs, .none }); }, + .arg, .alloc, .br, .constant, + .const_ty, .breakpoint, .dbg_stmt, .varptr, @@ -255,15 +271,30 @@ fn analyzeInst( if (args.len <= bpi - 2) { var buf: [bpi - 1]Air.Inst.Ref = undefined; buf[0] = callee; - std.mem.copy(&buf, buf[1..], args); + std.mem.copy(Air.Inst.Ref, buf[1..], @bitCast([]const Air.Inst.Ref, args)); return trackOperands(a, new_set, inst, main_tomb, buf); } - @panic("TODO: liveness analysis for function with many args"); + @panic("TODO: liveness analysis for function with greater than 2 args"); }, .struct_field_ptr => { const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ extra.struct_ptr, .none, .none }); }, + .assembly => { + const extra = a.air.extraData(Air.Asm, inst_datas[inst].ty_pl.payload); + const extended = a.zir.instructions.items(.data)[extra.data.zir_index].extended; + const outputs_len = @truncate(u5, extended.small); + const inputs_len = @truncate(u5, extended.small >> 5); + const outputs = a.air.extra[extra.end..][0..outputs_len]; + const inputs = a.air.extra[extra.end + outputs.len ..][0..inputs_len]; + if (outputs.len + inputs.len <= bpi - 1) { + var buf: [bpi - 1]Air.Inst.Ref = undefined; + std.mem.copy(Air.Inst.Ref, &buf, @bitCast([]const Air.Inst.Ref, outputs)); + std.mem.copy(Air.Inst.Ref, buf[outputs.len..], @bitCast([]const Air.Inst.Ref, inputs)); + return trackOperands(a, new_set, inst, main_tomb, buf); + } + @panic("TODO: liveness analysis for asm with greater than 3 args"); + }, .block => { const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); const body = a.air.extra[extra.end..][0..extra.data.body_len]; @@ -287,8 +318,8 @@ fn analyzeInst( const then_body = a.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = a.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - var then_table = std.AutoHashMap(Air.Inst.Index, void).init(gpa); - defer then_table.deinit(); + var then_table: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}; + defer then_table.deinit(gpa); try analyzeWithContext(a, &then_table, then_body); // Reset the table back to its state from before the branch. @@ -299,8 +330,8 @@ fn analyzeInst( } } - var else_table = std.AutoHashMap(Air.Inst.Index, void).init(gpa); - defer else_table.deinit(); + var else_table: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}; + defer else_table.deinit(gpa); try analyzeWithContext(a, &else_table, else_body); var then_entry_deaths = std.ArrayList(Air.Inst.Index).init(gpa); @@ -331,7 +362,7 @@ fn analyzeInst( } // Now we have to correctly populate new_set. if (new_set) |ns| { - try ns.ensureCapacity(@intCast(u32, ns.count() + then_table.count() + else_table.count())); + try ns.ensureCapacity(gpa, @intCast(u32, ns.count() + then_table.count() + else_table.count())); var it = then_table.keyIterator(); while (it.next()) |key| { _ = ns.putAssumeCapacity(key.*, {}); @@ -344,7 +375,7 @@ fn analyzeInst( const then_death_count = @intCast(u32, then_entry_deaths.items.len); const else_death_count = @intCast(u32, else_entry_deaths.items.len); - try a.extra.ensureUnusedCapacity(std.meta.fields(@TypeOf(CondBr)).len + + try a.extra.ensureUnusedCapacity(gpa, std.meta.fields(Air.CondBr).len + then_death_count + else_death_count); const extra_index = a.addExtraAssumeCapacity(CondBr{ .then_death_count = then_death_count, @@ -352,7 +383,7 @@ fn analyzeInst( }); a.extra.appendSliceAssumeCapacity(then_entry_deaths.items); a.extra.appendSliceAssumeCapacity(else_entry_deaths.items); - try a.special.put(inst, extra_index); + try a.special.put(gpa, inst, extra_index); // Continue on with the instruction analysis. The following code will find the condition // instruction, and the deaths flag for the CondBr instruction will indicate whether the @@ -438,12 +469,12 @@ fn analyzeInst( }); for (case_deaths[0 .. case_deaths.len - 1]) |*cd| { const case_death_count = @intCast(u32, cd.items.len); - try a.extra.ensureUnusedCapacity(1 + case_death_count + else_death_count); + try a.extra.ensureUnusedCapacity(gpa, 1 + case_death_count + else_death_count); a.extra.appendAssumeCapacity(case_death_count); a.extra.appendSliceAssumeCapacity(cd.items); } a.extra.appendSliceAssumeCapacity(case_deaths[case_deaths.len - 1].items); - try a.special.put(inst, extra_index); + try a.special.put(gpa, inst, extra_index); return trackOperands(a, new_set, inst, main_tomb, .{ condition, .none, .none }); }, @@ -452,7 +483,7 @@ fn analyzeInst( fn trackOperands( a: *Analysis, - new_set: ?*std.AutoHashMap(Air.Inst.Index, void), + new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), inst: Air.Inst.Index, main_tomb: bool, operands: [bpi - 1]Air.Inst.Ref, @@ -468,12 +499,12 @@ fn trackOperands( tomb_bits <<= 1; const op_int = @enumToInt(operands[i]); if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const operand: Air.Inst.Index = op_int - Air.Inst.Ref.typed_value_map.len; + const operand: Air.Inst.Index = op_int - @intCast(u32, Air.Inst.Ref.typed_value_map.len); const prev = try table.fetchPut(gpa, operand, {}); if (prev == null) { // Death. tomb_bits |= 1; - if (new_set) |ns| try ns.putNoClobber(operand, {}); + if (new_set) |ns| try ns.putNoClobber(gpa, operand, {}); } } a.storeTombBits(inst, tomb_bits); diff --git a/src/Module.zig b/src/Module.zig index 5972c2bdcf..7ec9c7e93d 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1225,6 +1225,30 @@ pub const Scope = struct { pub fn getFileScope(block: *Block) *Scope.File { return block.src_decl.namespace.file_scope; } + + pub fn addTyOp( + block: *Block, + tag: Air.Inst.Tag, + ty: Type, + operand: Air.Inst.Ref, + ) error{OutOfMemory}!Air.Inst.Ref { + const sema = block.sema; + const gpa = sema.gpa; + + try sema.air_instructions.ensureUnusedCapacity(gpa, 1); + try block.instructions.ensureUnusedCapacity(gpa, 1); + + const inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + sema.air_instructions.appendAssumeCapacity(.{ + .tag = tag, + .data = .{ .ty_op = .{ + .ty = try sema.addType(ty), + .operand = operand, + } }, + }); + block.instructions.appendAssumeCapacity(inst); + return Sema.indexToRef(inst); + } }; }; @@ -3408,7 +3432,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { defer decl.value_arena.?.* = arena.state; const fn_ty = decl.ty; - const param_inst_list = try gpa.alloc(Air.Inst.Index, fn_ty.fnParamLen()); + const param_inst_list = try gpa.alloc(Air.Inst.Ref, fn_ty.fnParamLen()); defer gpa.free(param_inst_list); var sema: Sema = .{ @@ -3440,10 +3464,13 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { defer inner_block.instructions.deinit(gpa); // AIR requires the arg parameters to be the first N instructions. + try inner_block.instructions.ensureTotalCapacity(gpa, param_inst_list.len); for (param_inst_list) |*param_inst, param_index| { const param_type = fn_ty.fnParamType(param_index); const ty_ref = try sema.addType(param_type); - param_inst.* = @intCast(u32, sema.air_instructions.len); + const arg_index = @intCast(u32, sema.air_instructions.len); + inner_block.instructions.appendAssumeCapacity(arg_index); + param_inst.* = Sema.indexToRef(arg_index); try sema.air_instructions.append(gpa, .{ .tag = .arg, .data = .{ @@ -3454,7 +3481,6 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { }, }); } - try inner_block.instructions.appendSlice(gpa, param_inst_list); func.state = .in_progress; log.debug("set {s} to in_progress", .{decl.name}); @@ -4043,13 +4069,11 @@ pub fn floatMul( } pub fn simplePtrType( - mod: *Module, arena: *Allocator, elem_ty: Type, mutable: bool, size: std.builtin.TypeInfo.Pointer.Size, ) Allocator.Error!Type { - _ = mod; if (!mutable and size == .Slice and elem_ty.eql(Type.initTag(.u8))) { return Type.initTag(.const_slice_u8); } diff --git a/src/Sema.zig b/src/Sema.zig index 54c42a482d..fc130cd4a4 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -36,7 +36,7 @@ func: ?*Module.Fn, /// > Denormalized data to make `resolveInst` faster. This is 0 if not inside a function, /// > otherwise it is the number of parameters of the function. /// > param_count: u32 -param_inst_list: []const Air.Inst.Index, +param_inst_list: []const Air.Inst.Ref, branch_quota: u32 = 1000, branch_count: u32 = 0, /// This field is updated when a new source location becomes active, so that @@ -59,8 +59,6 @@ const TypedValue = @import("TypedValue.zig"); const Air = @import("Air.zig"); const Zir = @import("Zir.zig"); const Module = @import("Module.zig"); -const Inst = ir.Inst; -const Body = ir.Body; const trace = @import("tracy.zig").trace; const Scope = Module.Scope; const InnerError = Module.InnerError; @@ -117,7 +115,7 @@ pub fn analyzeFnBody( /// Returns only the result from the body that is specified. /// Only appropriate to call when it is determined at comptime that this body /// has no peers. -fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!Air.Inst.Index { +fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!Air.Inst.Ref { const break_inst = try sema.analyzeBody(block, body); const operand_ref = sema.code.instructions.items(.data)[break_inst].@"break".operand; return sema.resolveInst(operand_ref); @@ -513,7 +511,7 @@ pub fn analyzeBody( // const break_inst = try sema.analyzeBody(block, inline_body); // const break_data = datas[break_inst].@"break"; // if (inst == break_data.block_inst) { - // break :blk try sema.resolveInst(break_data.operand); + // break :blk sema.resolveInst(break_data.operand); // } else { // return break_inst; // } @@ -529,12 +527,12 @@ pub fn analyzeBody( // const break_inst = try sema.analyzeBody(block, inline_body); // const break_data = datas[break_inst].@"break"; // if (inst == break_data.block_inst) { - // break :blk try sema.resolveInst(break_data.operand); + // break :blk sema.resolveInst(break_data.operand); // } else { // return break_inst; // } //}, - else => @panic("TODO remove else prong"), + else => @panic("TODO finish updating Sema for AIR memory layout changes and then remove this else prong"), }; if (sema.getAirType(air_inst).isNoReturn()) return always_noreturn; @@ -543,7 +541,7 @@ pub fn analyzeBody( } } -fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const extended = sema.code.instructions.items(.data)[inst].extended; switch (extended.opcode) { // zig fmt: off @@ -598,7 +596,7 @@ fn resolveConstBool( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) !bool { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const wanted_type = Type.initTag(.bool); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); @@ -611,7 +609,7 @@ fn resolveConstString( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) ![]u8 { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const wanted_type = Type.initTag(.const_slice_u8); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); @@ -619,24 +617,39 @@ fn resolveConstString( } pub fn resolveType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); return sema.resolveAirAsType(block, src, air_inst); } -fn resolveAirAsType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, air_inst: Air.Inst.Index) !Type { +fn resolveAirAsType( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + air_inst: Air.Inst.Ref, +) !Type { const wanted_type = Type.initTag(.@"type"); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); return val.toType(sema.arena); } -fn resolveConstValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !Value { - return (try sema.resolveDefinedValue(block, src, base)) orelse +fn resolveConstValue( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + air_ref: Air.Inst.Ref, +) !Value { + return (try sema.resolveDefinedValue(block, src, air_ref)) orelse return sema.failWithNeededComptime(block, src); } -fn resolveDefinedValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !?Value { - if (try sema.resolvePossiblyUndefinedValue(block, src, base)) |val| { +fn resolveDefinedValue( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + air_ref: Air.Inst.Ref, +) !?Value { + if (try sema.resolvePossiblyUndefinedValue(block, src, air_ref)) |val| { if (val.isUndef()) { return sema.failWithUseOfUndef(block, src); } @@ -649,13 +662,29 @@ fn resolvePossiblyUndefinedValue( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - base: Air.Inst.Index, + air_ref: Air.Inst.Ref, ) !?Value { - if (try sema.typeHasOnePossibleValue(block, src, base.ty)) |opv| { + const ty = sema.getTypeOfAirRef(air_ref); + if (try sema.typeHasOnePossibleValue(block, src, ty)) |opv| { return opv; } - const inst = base.castTag(.constant) orelse return null; - return inst.val; + // First section of indexes correspond to a set number of constant values. + var i: usize = @enumToInt(air_ref); + if (i < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[i].val; + } + i -= Air.Inst.Ref.typed_value_map.len; + + switch (sema.air_instructions.items(.tag)[i]) { + .constant => { + const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; + return sema.air_values.items[ty_pl.payload]; + }, + .const_ty => { + return sema.air_instructions.items(.data)[i].ty.toValue(undefined) catch unreachable; + }, + else => return null, + } } fn failWithNeededComptime(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) InnerError { @@ -677,7 +706,7 @@ fn resolveAlreadyCoercedInt( comptime Int: type, ) !Int { comptime assert(@typeInfo(Int).Int.bits <= 64); - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const val = try sema.resolveConstValue(block, src, air_inst); switch (@typeInfo(Int).Int.signedness) { .signed => return @intCast(Int, val.toSignedInt()), @@ -692,7 +721,7 @@ fn resolveInt( zir_ref: Zir.Inst.Ref, dest_type: Type, ) !u64 { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, dest_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced); @@ -705,21 +734,21 @@ pub fn resolveInstConst( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) InnerError!TypedValue { - const air_inst = try sema.resolveInst(zir_ref); - const val = try sema.resolveConstValue(block, src, air_inst); + const air_ref = sema.resolveInst(zir_ref); + const val = try sema.resolveConstValue(block, src, air_ref); return TypedValue{ - .ty = air_inst.ty, + .ty = sema.getTypeOfAirRef(air_ref), .val = val, }; } -fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO implement zir_sema.zirBitcastResultPtr", .{}); } -fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = inst; const tracy = trace(@src()); defer tracy.end(); @@ -754,7 +783,7 @@ fn zirStructDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); @@ -825,7 +854,7 @@ fn zirEnumDecl( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1022,7 +1051,7 @@ fn zirUnionDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1086,7 +1115,7 @@ fn zirOpaqueDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1106,7 +1135,7 @@ fn zirErrorSetDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1146,7 +1175,7 @@ fn zirRetPtr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1154,16 +1183,16 @@ fn zirRetPtr( try sema.requireFunctionBlock(block, src); const fn_ty = sema.func.?.owner_decl.ty; const ret_type = fn_ty.fnReturnType(); - const ptr_type = try sema.mod.simplePtrType(sema.arena, ret_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, ret_type, true, .One); return block.addNoOp(src, ptr_type, .alloc); } -fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_tok; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.analyzeRef(block, inst_data.src(), operand); } @@ -1171,7 +1200,7 @@ fn zirRetType( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1187,7 +1216,7 @@ fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) I defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.ensureResultUsed(block, operand, src); @@ -1196,7 +1225,7 @@ fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) I fn ensureResultUsed( sema: *Sema, block: *Scope.Block, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, src: LazySrcLoc, ) InnerError!void { switch (operand.ty.zigTypeTag()) { @@ -1210,7 +1239,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); switch (operand.ty.zigTypeTag()) { .ErrorSet, .ErrorUnion => return sema.mod.fail(&block.base, src, "error is discarded", .{}), @@ -1218,13 +1247,13 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde } } -fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const array_ptr = try sema.resolveInst(inst_data.operand); + const array_ptr = sema.resolveInst(inst_data.operand); const elem_ty = array_ptr.ty.elemType(); if (!elem_ty.isIndexable()) { @@ -1267,7 +1296,7 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air // Set the name of the Air.Arg instruction for use by codegen debug info. const air_arg = sema.param_inst_list[arg_index]; - sema.air_instructions.items(.data)[air_arg].ty_str.str = inst_data.start; + sema.air_instructions.items(.data)[refToIndex(air_arg).?].ty_str.str = inst_data.start; return air_arg; } @@ -1275,13 +1304,13 @@ fn zirAllocExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocExtended", .{}); } -fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1289,7 +1318,7 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_type = try sema.resolveType(block, ty_src, inst_data.operand); - const ptr_type = try sema.mod.simplePtrType(sema.arena, var_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); const val_payload = try sema.arena.create(Value.Payload.ComptimeAlloc); val_payload.* = .{ @@ -1304,13 +1333,13 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne }); } -fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocInferredComptime", .{}); } -fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1318,12 +1347,12 @@ fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!A const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_decl_src = inst_data.src(); const var_type = try sema.resolveType(block, ty_src, inst_data.operand); - const ptr_type = try sema.mod.simplePtrType(sema.arena, var_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); try sema.requireRuntimeBlock(block, var_decl_src); return block.addNoOp(var_decl_src, ptr_type, .alloc); } -fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1332,7 +1361,7 @@ fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_type = try sema.resolveType(block, ty_src, inst_data.operand); try sema.validateVarType(block, ty_src, var_type); - const ptr_type = try sema.mod.simplePtrType(sema.arena, var_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); try sema.requireRuntimeBlock(block, var_decl_src); return block.addNoOp(var_decl_src, ptr_type, .alloc); } @@ -1342,7 +1371,7 @@ fn zirAllocInferred( block: *Scope.Block, inst: Zir.Inst.Index, inferred_alloc_ty: Type, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1372,7 +1401,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); const ptr_val = ptr.castTag(.constant).?.val; const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; const peer_inst_list = inferred_alloc.data.stored_inst_list.items; @@ -1385,7 +1414,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde if (var_is_mut) { try sema.validateVarType(block, ty_src, final_elem_ty); } - const final_ptr_ty = try sema.mod.simplePtrType(sema.arena, final_elem_ty, true, .One); + const final_ptr_ty = try Module.simplePtrType(sema.arena, final_elem_ty, true, .One); // Change it to a normal alloc. ptr.ty = final_ptr_ty; @@ -1406,7 +1435,7 @@ fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Ind const struct_obj: *Module.Struct = s: { const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; - const object_ptr = try sema.resolveInst(field_ptr_extra.lhs); + const object_ptr = sema.resolveInst(field_ptr_extra.lhs); break :s object_ptr.ty.elemType().castTag(.@"struct").?.data; }; @@ -1535,9 +1564,9 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In // to omit it. return; } - const ptr = try sema.resolveInst(bin_inst.lhs); - const value = try sema.resolveInst(bin_inst.rhs); - const ptr_ty = try sema.mod.simplePtrType(sema.arena, value.ty, true, .One); + const ptr = sema.resolveInst(bin_inst.lhs); + const value = sema.resolveInst(bin_inst.rhs); + const ptr_ty = try Module.simplePtrType(sema.arena, value.ty, true, .One); // TODO detect when this store should be done at compile-time. For example, // if expressions should force it when the condition is compile-time known. const src: LazySrcLoc = .unneeded; @@ -1552,14 +1581,14 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) const src: LazySrcLoc = .unneeded; const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const ptr = try sema.resolveInst(bin_inst.lhs); - const value = try sema.resolveInst(bin_inst.rhs); + const ptr = sema.resolveInst(bin_inst.lhs); + const value = sema.resolveInst(bin_inst.rhs); const inferred_alloc = ptr.castTag(.constant).?.val.castTag(.inferred_alloc).?; // Add the stored instruction to the set we will use to resolve peer types // for the inferred allocation. try inferred_alloc.data.stored_inst_list.append(sema.arena, value); // Create a runtime bitcast instruction with exactly the type the pointer wants. - const ptr_ty = try sema.mod.simplePtrType(sema.arena, value.ty, true, .One); + const ptr_ty = try Module.simplePtrType(sema.arena, value.ty, true, .One); try sema.requireRuntimeBlock(block, src); const bitcasted_ptr = try block.addUnOp(src, ptr_ty, .bitcast, ptr); return sema.storePtr(block, src, bitcasted_ptr, value); @@ -1578,8 +1607,8 @@ fn zirStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!v defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const ptr = try sema.resolveInst(bin_inst.lhs); - const value = try sema.resolveInst(bin_inst.rhs); + const ptr = sema.resolveInst(bin_inst.lhs); + const value = sema.resolveInst(bin_inst.rhs); return sema.storePtr(block, sema.src, ptr, value); } @@ -1590,18 +1619,18 @@ fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const ptr = try sema.resolveInst(extra.lhs); - const value = try sema.resolveInst(extra.rhs); + const ptr = sema.resolveInst(extra.lhs); + const value = sema.resolveInst(extra.rhs); return sema.storePtr(block, src, ptr, value); } -fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = .unneeded; const inst_data = sema.code.instructions.items(.data)[inst].param_type; - const fn_inst = try sema.resolveInst(inst_data.callee); + const fn_inst = sema.resolveInst(inst_data.callee); const param_index = inst_data.param_index; const fn_ty: Type = switch (fn_inst.ty.zigTypeTag()) { @@ -1631,7 +1660,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.constType(sema.arena, src, param_type); } -fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1659,7 +1688,7 @@ fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.analyzeDeclRef(block, .unneeded, new_decl); } -fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1668,7 +1697,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.mod.constIntUnsigned(sema.arena, .unneeded, Type.initTag(.comptime_int), int); } -fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1686,7 +1715,7 @@ fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! }); } -fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].float; @@ -1699,7 +1728,7 @@ fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!A }); } -fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; @@ -1728,7 +1757,7 @@ fn zirCompileLog( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { var managed = sema.mod.compile_log_text.toManaged(sema.gpa); defer sema.mod.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); @@ -1741,7 +1770,7 @@ fn zirCompileLog( for (args) |arg_ref, i| { if (i != 0) try writer.print(", ", .{}); - const arg = try sema.resolveInst(arg_ref); + const arg = sema.resolveInst(arg_ref); if (try sema.resolvePossiblyUndefinedValue(block, src, arg)) |val| { try writer.print("@as({}, {})", .{ arg.ty, val }); } else { @@ -1773,12 +1802,12 @@ fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); - const msg_inst = try sema.resolveInst(inst_data.operand); + const msg_inst = sema.resolveInst(inst_data.operand); return sema.panicWithMsg(block, src, msg_inst); } -fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1843,7 +1872,7 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } -fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1853,13 +1882,13 @@ fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirCImport", .{}); } -fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirSuspendBlock", .{}); } -fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1917,7 +1946,7 @@ fn resolveBlockBody( child_block: *Scope.Block, body: []const Zir.Inst.Index, merges: *Scope.Block.Merges, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { _ = try sema.analyzeBody(child_block, body); return sema.analyzeBlockBody(parent_block, src, child_block, merges); } @@ -1928,7 +1957,7 @@ fn analyzeBlockBody( src: LazySrcLoc, child_block: *Scope.Block, merges: *Scope.Block.Merges, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2088,7 +2117,7 @@ fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) InnerE const inst_data = sema.code.instructions.items(.data)[inst].@"break"; const src = sema.src; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const zir_block = inst_data.block_inst; var block = start_block; @@ -2136,7 +2165,7 @@ fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError _ = try block.addDbgStmt(.unneeded, inst_data.line, inst_data.column); } -fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2144,7 +2173,7 @@ fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeDeclRef(block, src, decl); } -fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2198,7 +2227,7 @@ fn zirCall( inst: Zir.Inst.Index, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2208,12 +2237,12 @@ fn zirCall( const extra = sema.code.extraData(Zir.Inst.Call, inst_data.payload_index); const args = sema.code.refSlice(extra.end, extra.data.args_len); - const func = try sema.resolveInst(extra.data.callee); + const func = sema.resolveInst(extra.data.callee); // TODO handle function calls of generic functions - const resolved_args = try sema.arena.alloc(Air.Inst.Index, args.len); + const resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len); for (args) |zir_arg, i| { // the args are already casted to the result of a param type instruction. - resolved_args[i] = try sema.resolveInst(zir_arg); + resolved_args[i] = sema.resolveInst(zir_arg); } return sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args); @@ -2222,13 +2251,13 @@ fn zirCall( fn analyzeCall( sema: *Sema, block: *Scope.Block, - func: Air.Inst.Index, + func: Air.Inst.Ref, func_src: LazySrcLoc, call_src: LazySrcLoc, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, - args: []const Air.Inst.Index, -) InnerError!Air.Inst.Index { + args: []const Air.Inst.Ref, +) InnerError!Air.Inst.Ref { if (func.ty.zigTypeTag() != .Fn) return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func.ty}); @@ -2285,7 +2314,7 @@ fn analyzeCall( const is_comptime_call = block.is_comptime or modifier == .compile_time; const is_inline_call = is_comptime_call or modifier == .always_inline or func.ty.fnCallingConvention() == .Inline; - const result: Air.Inst.Index = if (is_inline_call) res: { + const result: Air.Inst.Ref = if (is_inline_call) res: { const func_val = try sema.resolveConstValue(block, func_src, func); const module_fn = switch (func_val.tag()) { .function => func_val.castTag(.function).?.data, @@ -2383,7 +2412,7 @@ fn analyzeCall( return result; } -fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2395,7 +2424,7 @@ fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.constType(sema.arena, src, ty); } -fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2407,7 +2436,7 @@ fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.mod.constType(sema.arena, src, opt_type); } -fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const array_type = try sema.resolveType(block, src, inst_data.operand); @@ -2415,7 +2444,7 @@ fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.constType(sema.arena, src, elem_type); } -fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -2430,7 +2459,7 @@ fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.mod.constType(sema.arena, src, vector_type); } -fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2443,7 +2472,7 @@ fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.constType(sema.arena, .unneeded, array_ty); } -fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2458,7 +2487,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) return sema.mod.constType(sema.arena, .unneeded, array_ty); } -fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2471,7 +2500,7 @@ fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.mod.constType(sema.arena, src, anyframe_type); } -fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2492,7 +2521,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.constType(sema.arena, src, err_union_ty); } -fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2511,14 +2540,14 @@ fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr }); } -fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const op = try sema.resolveInst(inst_data.operand); + const op = sema.resolveInst(inst_data.operand); const op_coerced = try sema.coerce(block, Type.initTag(.anyerror), op, operand_src); const result_ty = Type.initTag(.u16); @@ -2541,7 +2570,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addUnOp(src, result_ty, .bitcast, op_coerced); } -fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2549,7 +2578,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const op = try sema.resolveInst(inst_data.operand); + const op = sema.resolveInst(inst_data.operand); if (try sema.resolveDefinedValue(block, operand_src, op)) |value| { const int = value.toUnsignedInt(); @@ -2574,7 +2603,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addUnOp(src, Type.initTag(.anyerror), .bitcast, op); } -fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2583,8 +2612,8 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); if (rhs.ty.zigTypeTag() == .Bool and lhs.ty.zigTypeTag() == .Bool) { const msg = msg: { const msg = try sema.mod.errMsg(&block.base, lhs_src, "expected error set type, found 'bool'", .{}); @@ -2664,7 +2693,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn }); } -fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2678,15 +2707,15 @@ fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE }); } -fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); - const enum_tag: Air.Inst.Index = switch (operand.ty.zigTypeTag()) { + const enum_tag: Air.Inst.Ref = switch (operand.ty.zigTypeTag()) { .Enum => operand, .Union => { //if (!operand.ty.unionHasTag()) { @@ -2760,7 +2789,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return block.addUnOp(src, int_tag_ty, .bitcast, enum_tag); } -fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const mod = sema.mod; const target = mod.getTarget(); const arena = sema.arena; @@ -2770,7 +2799,7 @@ fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); + const operand = sema.resolveInst(extra.rhs); if (dest_ty.zigTypeTag() != .Enum) { return mod.fail(&block.base, dest_ty_src, "expected enum, found {}", .{dest_ty}); @@ -2821,12 +2850,12 @@ fn zirOptionalPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const optional_ptr = try sema.resolveInst(inst_data.operand); + const optional_ptr = sema.resolveInst(inst_data.operand); assert(optional_ptr.ty.zigTypeTag() == .Pointer); const src = inst_data.src(); @@ -2836,7 +2865,7 @@ fn zirOptionalPayloadPtr( } const child_type = try opt_type.optionalChildAlloc(sema.arena); - const child_pointer = try sema.mod.simplePtrType(sema.arena, child_type, !optional_ptr.ty.isConstPtr(), .One); + const child_pointer = try Module.simplePtrType(sema.arena, child_type, !optional_ptr.ty.isConstPtr(), .One); if (optional_ptr.value()) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); @@ -2864,13 +2893,13 @@ fn zirOptionalPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const opt_type = operand.ty; if (opt_type.zigTypeTag() != .Optional) { return sema.mod.fail(&block.base, src, "expected optional type, found {}", .{opt_type}); @@ -2902,13 +2931,13 @@ fn zirErrUnionPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, operand.src, "expected error union type, found '{}'", .{operand.ty}); @@ -2936,19 +2965,19 @@ fn zirErrUnionPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); assert(operand.ty.zigTypeTag() == .Pointer); if (operand.ty.elemType().zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found {}", .{operand.ty.elemType()}); - const operand_pointer_ty = try sema.mod.simplePtrType(sema.arena, operand.ty.elemType().castTag(.error_union).?.data.payload, !operand.ty.isConstPtr(), .One); + const operand_pointer_ty = try Module.simplePtrType(sema.arena, operand.ty.elemType().castTag(.error_union).?.data.payload, !operand.ty.isConstPtr(), .One); if (operand.value()) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); @@ -2975,13 +3004,13 @@ fn zirErrUnionPayloadPtr( } /// Value in, value out -fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand.ty}); @@ -3001,13 +3030,13 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner } /// Pointer in, value out -fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); assert(operand.ty.zigTypeTag() == .Pointer); if (operand.ty.elemType().zigTypeTag() != .ErrorUnion) @@ -3035,7 +3064,7 @@ fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand.ty}); if (operand.ty.castTag(.error_union).?.data.payload.zigTypeTag() != .Void) { @@ -3048,7 +3077,7 @@ fn zirFunc( block: *Scope.Block, inst: Zir.Inst.Index, inferred_error_set: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3099,7 +3128,7 @@ fn funcCommon( is_extern: bool, src_locs: Zir.Inst.Func.SrcLocs, opt_lib_name: ?[]const u8, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const bare_return_type = try sema.resolveType(block, ret_ty_src, zir_return_type); @@ -3240,7 +3269,7 @@ fn funcCommon( return result; } -fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3248,7 +3277,7 @@ fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air. return sema.analyzeAs(block, .unneeded, bin_inst.lhs, bin_inst.rhs); } -fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3264,18 +3293,18 @@ fn analyzeAs( src: LazySrcLoc, zir_dest_type: Zir.Inst.Ref, zir_operand: Zir.Inst.Ref, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const dest_type = try sema.resolveType(block, src, zir_dest_type); - const operand = try sema.resolveInst(zir_operand); + const operand = sema.resolveInst(zir_operand); return sema.coerce(block, dest_type, operand, src); } -fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); if (ptr.ty.zigTypeTag() != .Pointer) { const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr.ty}); @@ -3287,7 +3316,7 @@ fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(src, ty, .ptrtoint, ptr); } -fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3296,7 +3325,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); - const object = try sema.resolveInst(extra.lhs); + const object = sema.resolveInst(extra.lhs); const object_ptr = if (object.ty.zigTypeTag() == .Pointer) object else @@ -3305,7 +3334,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3314,11 +3343,11 @@ fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); - const object_ptr = try sema.resolveInst(extra.lhs); + const object_ptr = sema.resolveInst(extra.lhs); return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3326,14 +3355,14 @@ fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; - const object = try sema.resolveInst(extra.lhs); + const object = sema.resolveInst(extra.lhs); const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); const object_ptr = try sema.analyzeRef(block, src, object); const result_ptr = try sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3341,12 +3370,12 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; - const object_ptr = try sema.resolveInst(extra.lhs); + const object_ptr = sema.resolveInst(extra.lhs); const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3357,7 +3386,7 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); + const operand = sema.resolveInst(extra.rhs); const dest_is_comptime_int = switch (dest_type.zigTypeTag()) { .ComptimeInt => true, @@ -3389,20 +3418,21 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten int", .{}); } -fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); - return sema.bitcast(block, dest_type, operand); + const operand = sema.resolveInst(extra.rhs); + return sema.bitcast(block, dest_type, operand, operand_src); } -fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3413,7 +3443,7 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); + const operand = sema.resolveInst(extra.rhs); const dest_is_comptime_float = switch (dest_type.zigTypeTag()) { .ComptimeFloat => true, @@ -3445,22 +3475,22 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten float", .{}); } -fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const array = try sema.resolveInst(bin_inst.lhs); + const array = sema.resolveInst(bin_inst.lhs); const array_ptr = if (array.ty.zigTypeTag() == .Pointer) array else try sema.analyzeRef(block, sema.src, array); - const elem_index = try sema.resolveInst(bin_inst.rhs); + const elem_index = sema.resolveInst(bin_inst.rhs); const result_ptr = try sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); return sema.analyzeLoad(block, sema.src, result_ptr, sema.src); } -fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3468,27 +3498,27 @@ fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE const src = inst_data.src(); const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const array = try sema.resolveInst(extra.lhs); + const array = sema.resolveInst(extra.lhs); const array_ptr = if (array.ty.zigTypeTag() == .Pointer) array else try sema.analyzeRef(block, src, array); - const elem_index = try sema.resolveInst(extra.rhs); + const elem_index = sema.resolveInst(extra.rhs); const result_ptr = try sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const array_ptr = try sema.resolveInst(bin_inst.lhs); - const elem_index = try sema.resolveInst(bin_inst.rhs); + const array_ptr = sema.resolveInst(bin_inst.lhs); + const elem_index = sema.resolveInst(bin_inst.rhs); return sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); } -fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3496,39 +3526,39 @@ fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE const src = inst_data.src(); const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const elem_index = try sema.resolveInst(extra.rhs); + const array_ptr = sema.resolveInst(extra.lhs); + const elem_index = sema.resolveInst(extra.rhs); return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); } -fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.SliceStart, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const start = try sema.resolveInst(extra.start); + const array_ptr = sema.resolveInst(extra.lhs); + const start = sema.resolveInst(extra.start); return sema.analyzeSlice(block, src, array_ptr, start, null, null, .unneeded); } -fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.SliceEnd, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const start = try sema.resolveInst(extra.start); - const end = try sema.resolveInst(extra.end); + const array_ptr = sema.resolveInst(extra.lhs); + const start = sema.resolveInst(extra.start); + const end = sema.resolveInst(extra.end); return sema.analyzeSlice(block, src, array_ptr, start, end, null, .unneeded); } -fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3536,10 +3566,10 @@ fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const sentinel_src: LazySrcLoc = .{ .node_offset_slice_sentinel = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.SliceSentinel, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const start = try sema.resolveInst(extra.start); - const end = try sema.resolveInst(extra.end); - const sentinel = try sema.resolveInst(extra.sentinel); + const array_ptr = sema.resolveInst(extra.lhs); + const start = sema.resolveInst(extra.start); + const end = sema.resolveInst(extra.end); + const sentinel = sema.resolveInst(extra.sentinel); return sema.analyzeSlice(block, src, array_ptr, start, end, sentinel, sentinel_src); } @@ -3550,7 +3580,7 @@ fn zirSwitchCapture( inst: Zir.Inst.Index, is_multi: bool, is_ref: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3569,7 +3599,7 @@ fn zirSwitchCaptureElse( block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3588,7 +3618,7 @@ fn zirSwitchBlock( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3597,7 +3627,7 @@ fn zirSwitchBlock( const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index); - const operand_ptr = try sema.resolveInst(extra.data.operand); + const operand_ptr = sema.resolveInst(extra.data.operand); const operand = if (is_ref) try sema.analyzeLoad(block, src, operand_ptr, operand_src) else @@ -3621,7 +3651,7 @@ fn zirSwitchBlockMulti( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3630,7 +3660,7 @@ fn zirSwitchBlockMulti( const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.SwitchBlockMulti, inst_data.payload_index); - const operand_ptr = try sema.resolveInst(extra.data.operand); + const operand_ptr = sema.resolveInst(extra.data.operand); const operand = if (is_ref) try sema.analyzeLoad(block, src, operand_ptr, operand_src) else @@ -3651,14 +3681,14 @@ fn zirSwitchBlockMulti( fn analyzeSwitch( sema: *Sema, block: *Scope.Block, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, extra_end: usize, special_prong: Zir.SpecialProng, scalar_cases_len: usize, multi_cases_len: usize, switch_inst: Zir.Inst.Index, src_node_offset: i32, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const gpa = sema.gpa; const mod = sema.mod; @@ -4217,7 +4247,7 @@ fn analyzeSwitch( const bool_ty = comptime Type.initTag(.bool); for (items) |item_ref| { - const item = try sema.resolveInst(item_ref); + const item = sema.resolveInst(item_ref); _ = try sema.resolveConstValue(&child_block, item.src, item); const cmp_ok = try case_block.addBinOp(item.src, bool_ty, .cmp_eq, operand, item); @@ -4235,8 +4265,8 @@ fn analyzeSwitch( const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; - const item_first = try sema.resolveInst(first_ref); - const item_last = try sema.resolveInst(last_ref); + const item_first = sema.resolveInst(first_ref); + const item_last = sema.resolveInst(last_ref); _ = try sema.resolveConstValue(&child_block, item_first.src, item_first); _ = try sema.resolveConstValue(&child_block, item_last.src, item_last); @@ -4334,7 +4364,7 @@ fn resolveSwitchItemVal( switch_prong_src: Module.SwitchProngSrc, range_expand: Module.SwitchProngSrc.RangeExpand, ) InnerError!TypedValue { - const item = try sema.resolveInst(item_ref); + const item = sema.resolveInst(item_ref); // We have to avoid the other helper functions here because we cannot construct a LazySrcLoc // because we only have the switch AST node. Only if we know for sure we need to report // a compile error do we resolve the full source locations. @@ -4513,7 +4543,7 @@ fn validateSwitchNoRange( return sema.mod.failWithOwnedErrorMsg(&block.base, msg); } -fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; _ = extra; @@ -4522,7 +4552,7 @@ fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, src, "TODO implement zirHasField", .{}); } -fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -4547,7 +4577,7 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return mod.constBool(arena, src, false); } -fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4572,13 +4602,13 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return mod.constType(sema.arena, src, file_root_decl.ty); } -fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; _ = inst; return sema.mod.fail(&block.base, sema.src, "TODO implement zirRetErrValueCode", .{}); } -fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4587,7 +4617,7 @@ fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.mod.fail(&block.base, sema.src, "TODO implement zirShl", .{}); } -fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4599,8 +4629,8 @@ fn zirBitwise( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, - ir_tag: ir.Inst.Tag, -) InnerError!Air.Inst.Index { + air_tag: Air.Inst.Tag, +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4609,8 +4639,8 @@ fn zirBitwise( const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); @@ -4655,10 +4685,10 @@ fn zirBitwise( } try sema.requireRuntimeBlock(block, src); - return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); + return block.addBinOp(src, scalar_type, air_tag, casted_lhs, casted_rhs); } -fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4666,7 +4696,7 @@ fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.fail(&block.base, sema.src, "TODO implement zirBitNot", .{}); } -fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4674,7 +4704,7 @@ fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayCat", .{}); } -fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4687,7 +4717,7 @@ fn zirNegate( block: *Scope.Block, inst: Zir.Inst.Index, tag_override: Zir.Inst.Tag, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4695,13 +4725,13 @@ fn zirNegate( const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - const lhs = try sema.resolveInst(.zero); - const rhs = try sema.resolveInst(inst_data.operand); + const lhs = sema.resolveInst(.zero); + const rhs = sema.resolveInst(inst_data.operand); return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src); } -fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4711,8 +4741,8 @@ fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); return sema.analyzeArithmetic(block, tag_override, lhs, rhs, sema.src, lhs_src, rhs_src); } @@ -4721,7 +4751,7 @@ fn zirOverflowArithmetic( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4735,12 +4765,12 @@ fn analyzeArithmetic( sema: *Sema, block: *Scope.Block, zir_tag: Zir.Inst.Tag, - lhs: Air.Inst.Index, - rhs: Air.Inst.Index, + lhs: Air.Inst.Ref, + rhs: Air.Inst.Ref, src: LazySrcLoc, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); @@ -4850,14 +4880,14 @@ fn analyzeArithmetic( return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); } -fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr_src: LazySrcLoc = .{ .node_offset_deref_ptr = inst_data.src_node }; - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); return sema.analyzeLoad(block, src, ptr, ptr_src); } @@ -4865,7 +4895,7 @@ fn zirAsm( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4915,7 +4945,7 @@ fn zirAsm( const name = sema.code.nullTerminatedString(input.data.name); _ = name; // TODO: use the name - arg.* = try sema.resolveInst(input.data.operand); + arg.* = sema.resolveInst(input.data.operand); inputs[arg_i] = sema.code.nullTerminatedString(input.data.constraint); } @@ -4949,7 +4979,7 @@ fn zirCmp( block: *Scope.Block, inst: Zir.Inst.Index, op: std.math.CompareOperator, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4960,8 +4990,8 @@ fn zirCmp( const src: LazySrcLoc = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); const is_equality_cmp = switch (op) { .eq, .neq => true, @@ -5047,7 +5077,7 @@ fn zirCmp( return block.addBinOp(src, bool_type, tag, casted_lhs, casted_rhs); } -fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5057,7 +5087,7 @@ fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.constIntUnsigned(sema.arena, src, Type.initTag(.comptime_int), abi_size); } -fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5071,7 +5101,7 @@ fn zirThis( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirThis", .{}); } @@ -5080,7 +5110,7 @@ fn zirRetAddr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirRetAddr", .{}); } @@ -5089,12 +5119,12 @@ fn zirBuiltinSrc( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinSrc", .{}); } -fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); @@ -5137,31 +5167,31 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } } -fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.mod.constType(sema.arena, src, operand.ty); } -fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand_ptr = try sema.resolveInst(inst_data.operand); + const operand_ptr = sema.resolveInst(inst_data.operand); const elem_ty = operand_ptr.ty.elemType(); return sema.mod.constType(sema.arena, src, elem_ty); } -fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirTypeofLog2IntType", .{}); } -fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirLog2IntType", .{}); @@ -5171,7 +5201,7 @@ fn zirTypeofPeer( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5183,20 +5213,20 @@ fn zirTypeofPeer( defer sema.gpa.free(inst_list); for (args) |arg_ref, i| { - inst_list[i] = try sema.resolveInst(arg_ref); + inst_list[i] = sema.resolveInst(arg_ref); } const result_type = try sema.resolvePeerTypes(block, src, inst_list); return sema.mod.constType(sema.arena, src, result_type); } -fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const uncasted_operand = try sema.resolveInst(inst_data.operand); + const uncasted_operand = sema.resolveInst(inst_data.operand); const bool_type = Type.initTag(.bool); const operand = try sema.coerce(block, bool_type, uncasted_operand, uncasted_operand.src); @@ -5212,16 +5242,16 @@ fn zirBoolOp( block: *Scope.Block, inst: Zir.Inst.Index, comptime is_bool_or: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = .unneeded; const bool_type = Type.initTag(.bool); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const uncasted_lhs = try sema.resolveInst(bin_inst.lhs); + const uncasted_lhs = sema.resolveInst(bin_inst.lhs); const lhs = try sema.coerce(block, bool_type, uncasted_lhs, uncasted_lhs.src); - const uncasted_rhs = try sema.resolveInst(bin_inst.rhs); + const uncasted_rhs = sema.resolveInst(bin_inst.rhs); const rhs = try sema.coerce(block, bool_type, uncasted_rhs, uncasted_rhs.src); if (lhs.value()) |lhs_val| { @@ -5234,7 +5264,7 @@ fn zirBoolOp( } } try sema.requireRuntimeBlock(block, src); - const tag: ir.Inst.Tag = if (is_bool_or) .bool_or else .bool_and; + const tag: Air.Inst.Tag = if (is_bool_or) .bool_or else .bool_and; return block.addBinOp(src, bool_type, tag, lhs, rhs); } @@ -5243,14 +5273,14 @@ fn zirBoolBr( parent_block: *Scope.Block, inst: Zir.Inst.Index, is_bool_or: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const datas = sema.code.instructions.items(.data); const inst_data = datas[inst].bool_br; const src: LazySrcLoc = .unneeded; - const lhs = try sema.resolveInst(inst_data.lhs); + const lhs = sema.resolveInst(inst_data.lhs); const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; @@ -5313,13 +5343,13 @@ fn zirIsNonNull( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.analyzeIsNull(block, src, operand, true); } @@ -5327,33 +5357,33 @@ fn zirIsNonNullPtr( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNull(block, src, loaded, true); } -fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.analyzeIsNonErr(block, inst_data.src(), operand); } -fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNonErr(block, src, loaded); } @@ -5374,7 +5404,7 @@ fn zirCondbr( const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - const uncasted_cond = try sema.resolveInst(extra.data.condition); + const uncasted_cond = sema.resolveInst(extra.data.condition); const cond = try sema.coerce(parent_block, Type.initTag(.bool), uncasted_cond, cond_src); if (try sema.resolveDefinedValue(parent_block, src, cond)) |cond_val| { @@ -5456,7 +5486,7 @@ fn zirRetCoerce( defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_tok; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.analyzeRet(block, operand, src, need_coercion); @@ -5467,7 +5497,7 @@ fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.analyzeRet(block, operand, src, false); @@ -5476,7 +5506,7 @@ fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError fn analyzeRet( sema: *Sema, block: *Scope.Block, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, src: LazySrcLoc, need_coercion: bool, ) InnerError!Zir.Inst.Index { @@ -5511,7 +5541,7 @@ fn floatOpAllowed(tag: Zir.Inst.Tag) bool { }; } -fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5532,7 +5562,7 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.mod.constType(sema.arena, .unneeded, ty); } -fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5586,7 +5616,7 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.constType(sema.arena, src, ty); } -fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5600,13 +5630,13 @@ fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In }); } -fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnionInitPtr", .{}); } -fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); @@ -5657,7 +5687,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return mod.failWithOwnedErrorMsg(&block.base, msg); } found_fields[field_index] = item.data.field_type; - field_inits[field_index] = try sema.resolveInst(item.data.init); + field_inits[field_index] = sema.resolveInst(item.data.init); } var root_msg: ?*Module.ErrorMsg = null; @@ -5719,7 +5749,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return mod.fail(&block.base, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{}); } -fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5727,7 +5757,7 @@ fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ return sema.mod.fail(&block.base, src, "TODO: Sema.zirStructInitAnon", .{}); } -fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5735,7 +5765,7 @@ fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInit", .{}); } -fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5743,13 +5773,13 @@ fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_r return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInitAnon", .{}); } -fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldTypeRef", .{}); } -fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const src = inst_data.src(); @@ -5771,7 +5801,7 @@ fn zirErrorReturnTrace( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorReturnTrace", .{}); } @@ -5780,7 +5810,7 @@ fn zirFrame( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrame", .{}); } @@ -5789,91 +5819,91 @@ fn zirFrameAddress( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameAddress", .{}); } -fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignOf", .{}); } -fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBoolToInt", .{}); } -fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirEmbedFile", .{}); } -fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorName", .{}); } -fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnaryMath", .{}); } -fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTagName", .{}); } -fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReify", .{}); } -fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTypeName", .{}); } -fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameType", .{}); } -fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameSize", .{}); } -fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFloatToInt", .{}); } -fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirIntToFloat", .{}); } -fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; - const operand_res = try sema.resolveInst(extra.rhs); + const operand_res = sema.resolveInst(extra.rhs); const operand_coerced = try sema.coerce(block, Type.initTag(.usize), operand_res, operand_src); const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5929,199 +5959,199 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(src, type_res, .bitcast, operand_coerced); } -fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrSetCast", .{}); } -fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPtrCast", .{}); } -fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTruncate", .{}); } -fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignCast", .{}); } -fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirClz", .{}); } -fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCtz", .{}); } -fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPopCount", .{}); } -fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirByteSwap", .{}); } -fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitReverse", .{}); } -fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivExact", .{}); } -fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivFloor", .{}); } -fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivTrunc", .{}); } -fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMod", .{}); } -fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirRem", .{}); } -fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShlExact", .{}); } -fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShrExact", .{}); } -fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitOffsetOf", .{}); } -fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirOffsetOf", .{}); } -fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCmpxchg", .{}); } -fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirSplat", .{}); } -fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReduce", .{}); } -fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShuffle", .{}); } -fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicLoad", .{}); } -fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicRmw", .{}); } -fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicStore", .{}); } -fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMulAdd", .{}); } -fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinCall", .{}); } -fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldPtrType", .{}); } -fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldParentPtr", .{}); } -fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy", .{}); } -fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset", .{}); } -fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinAsyncCall", .{}); } -fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirResume", .{}); @@ -6132,7 +6162,7 @@ fn zirAwait( block: *Scope.Block, inst: Zir.Inst.Index, is_nosuspend: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -6144,7 +6174,7 @@ fn zirVarExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const src = sema.src; const ty_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at type @@ -6210,7 +6240,7 @@ fn zirFuncExtended( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -6277,7 +6307,7 @@ fn zirCUndef( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCUndef", .{}); @@ -6287,7 +6317,7 @@ fn zirCInclude( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCInclude", .{}); @@ -6297,7 +6327,7 @@ fn zirCDefine( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCDefine", .{}); @@ -6307,7 +6337,7 @@ fn zirWasmMemorySize( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemorySize", .{}); @@ -6317,7 +6347,7 @@ fn zirWasmMemoryGrow( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemoryGrow", .{}); @@ -6327,7 +6357,7 @@ fn zirBuiltinExtern( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinExtern", .{}); @@ -6361,7 +6391,7 @@ pub const PanicId = enum { invalid_error_code, }; -fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Index, panic_id: PanicId) !void { +fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Ref, panic_id: PanicId) !void { const block_inst = try sema.arena.create(Inst.Block); block_inst.* = .{ .base = .{ @@ -6423,7 +6453,7 @@ fn panicWithMsg( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - msg_inst: Air.Inst.Index, + msg_inst: Air.Inst.Ref, ) !Zir.Inst.Index { const mod = sema.mod; const arena = sema.arena; @@ -6439,7 +6469,7 @@ fn panicWithMsg( const panic_fn = try sema.getBuiltin(block, src, "panic"); const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try mod.simplePtrType(arena, stack_trace_ty, true, .One); + const ptr_stack_trace_ty = try Module.simplePtrType(arena, stack_trace_ty, true, .One); const null_stack_trace = try mod.constInst(arena, src, .{ .ty = try mod.optionalType(arena, ptr_stack_trace_ty), .val = Value.initTag(.null_value), @@ -6500,10 +6530,10 @@ fn namedFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - object_ptr: Air.Inst.Index, + object_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; @@ -6579,7 +6609,7 @@ fn namedFieldPtr( } else (try mod.getErrorValue(field_name)).key; return mod.constInst(arena, src, .{ - .ty = try mod.simplePtrType(arena, child_type, false, .One), + .ty = try Module.simplePtrType(arena, child_type, false, .One), .val = try Value.Tag.ref_val.create( arena, try Value.Tag.@"error".create(arena, .{ @@ -6633,7 +6663,7 @@ fn namedFieldPtr( const field_index_u32 = @intCast(u32, field_index); const enum_val = try Value.Tag.enum_field_index.create(arena, field_index_u32); return mod.constInst(arena, src, .{ - .ty = try mod.simplePtrType(arena, child_type, false, .One), + .ty = try Module.simplePtrType(arena, child_type, false, .One), .val = try Value.Tag.ref_val.create(arena, enum_val), }); }, @@ -6653,7 +6683,7 @@ fn analyzeNamespaceLookup( src: LazySrcLoc, namespace: *Scope.Namespace, decl_name: []const u8, -) InnerError!?Air.Inst.Index { +) InnerError!?Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; if (try sema.lookupInNamespace(namespace, decl_name)) |decl| { @@ -6677,11 +6707,11 @@ fn analyzeStructFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - struct_ptr: Air.Inst.Index, + struct_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; assert(unresolved_struct_ty.zigTypeTag() == .Struct); @@ -6692,7 +6722,7 @@ fn analyzeStructFieldPtr( const field_index = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadFieldAccess(block, struct_obj, field_name_src, field_name); const field = struct_obj.fields.values()[field_index]; - const ptr_field_ty = try mod.simplePtrType(arena, field.ty, true, .One); + const ptr_field_ty = try Module.simplePtrType(arena, field.ty, true, .One); if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { return mod.constInst(arena, src, .{ @@ -6712,11 +6742,11 @@ fn analyzeUnionFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - union_ptr: Air.Inst.Index, + union_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_union_ty: Type, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; assert(unresolved_union_ty.zigTypeTag() == .Union); @@ -6728,7 +6758,7 @@ fn analyzeUnionFieldPtr( return sema.failWithBadUnionFieldAccess(block, union_obj, field_name_src, field_name); const field = union_obj.fields.values()[field_index]; - const ptr_field_ty = try mod.simplePtrType(arena, field.ty, true, .One); + const ptr_field_ty = try Module.simplePtrType(arena, field.ty, true, .One); if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| { // TODO detect inactive union field and emit compile error @@ -6749,10 +6779,10 @@ fn elemPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: Air.Inst.Index, - elem_index: Air.Inst.Index, + array_ptr: Air.Inst.Ref, + elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const array_ty = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, array_ptr.src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -6776,10 +6806,10 @@ fn elemPtrArray( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: Air.Inst.Index, - elem_index: Air.Inst.Index, + array_ptr: Air.Inst.Ref, + elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { if (array_ptr.value()) |array_ptr_val| { if (elem_index.value()) |index_val| { // Both array pointer and index are compile-time known. @@ -6804,35 +6834,41 @@ fn coerce( sema: *Sema, block: *Scope.Block, dest_type: Type, - inst: Air.Inst.Index, + inst: Air.Inst.Ref, inst_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { if (dest_type.tag() == .var_args_param) { - return sema.coerceVarArgParam(block, inst); + return sema.coerceVarArgParam(block, inst, inst_src); } + + const inst_ty = sema.getTypeOfAirRef(inst); // If the types are the same, we can return the operand. - if (dest_type.eql(inst.ty)) + if (dest_type.eql(inst_ty)) return inst; - const in_memory_result = coerceInMemoryAllowed(dest_type, inst.ty); + const in_memory_result = coerceInMemoryAllowed(dest_type, inst_ty); if (in_memory_result == .ok) { - return sema.bitcast(block, dest_type, inst); + return sema.bitcast(block, dest_type, inst, inst_src); } const mod = sema.mod; const arena = sema.arena; // undefined to anything - if (inst.value()) |val| { - if (val.isUndef() or inst.ty.zigTypeTag() == .Undefined) { - return mod.constInst(arena, inst_src, .{ .ty = dest_type, .val = val }); + if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { + if (val.isUndef() or inst_ty.zigTypeTag() == .Undefined) { + return sema.addConstant(dest_type, val); } } - assert(inst.ty.zigTypeTag() != .Undefined); + assert(inst_ty.zigTypeTag() != .Undefined); + + if (true) { + @panic("TODO finish AIR memory layout rework"); + } // T to E!T or E to E!T if (dest_type.tag() == .error_union) { - return try sema.wrapErrorUnion(block, dest_type, inst); + return try sema.wrapErrorUnion(block, dest_type, inst, inst_src); } // comptime known number to other number @@ -6844,14 +6880,14 @@ fn coerce( switch (dest_type.zigTypeTag()) { .Optional => { // null to ?T - if (inst.ty.zigTypeTag() == .Null) { + if (inst_ty.zigTypeTag() == .Null) { return mod.constInst(arena, inst_src, .{ .ty = dest_type, .val = Value.initTag(.null_value) }); } // T to ?T var buf: Type.Payload.ElemType = undefined; const child_type = dest_type.optionalChild(&buf); - if (child_type.eql(inst.ty)) { + if (child_type.eql(inst_ty)) { return sema.wrapOptional(block, dest_type, inst); } else if (try sema.coerceNum(block, child_type, inst)) |some| { return sema.wrapOptional(block, dest_type, some); @@ -6860,12 +6896,12 @@ fn coerce( .Pointer => { // Coercions where the source is a single pointer to an array. src_array_ptr: { - if (!inst.ty.isSinglePointer()) break :src_array_ptr; - const array_type = inst.ty.elemType(); + if (!inst_ty.isSinglePointer()) break :src_array_ptr; + const array_type = inst_ty.elemType(); if (array_type.zigTypeTag() != .Array) break :src_array_ptr; const array_elem_type = array_type.elemType(); - if (inst.ty.isConstPtr() and !dest_type.isConstPtr()) break :src_array_ptr; - if (inst.ty.isVolatilePtr() and !dest_type.isVolatilePtr()) break :src_array_ptr; + if (inst_ty.isConstPtr() and !dest_type.isConstPtr()) break :src_array_ptr; + if (inst_ty.isVolatilePtr() and !dest_type.isVolatilePtr()) break :src_array_ptr; const dst_elem_type = dest_type.elemType(); switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type)) { @@ -6904,11 +6940,11 @@ fn coerce( }, .Int => { // integer widening - if (inst.ty.zigTypeTag() == .Int) { + if (inst_ty.zigTypeTag() == .Int) { assert(inst.value() == null); // handled above const dst_info = dest_type.intInfo(target); - const src_info = inst.ty.intInfo(target); + const src_info = inst_ty.intInfo(target); if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or // small enough unsigned ints can get casted to large enough signed ints (src_info.signedness == .signed and dst_info.signedness == .unsigned and dst_info.bits > src_info.bits)) @@ -6920,10 +6956,10 @@ fn coerce( }, .Float => { // float widening - if (inst.ty.zigTypeTag() == .Float) { + if (inst_ty.zigTypeTag() == .Float) { assert(inst.value() == null); // handled above - const src_bits = inst.ty.floatBits(target); + const src_bits = inst_ty.floatBits(target); const dst_bits = dest_type.floatBits(target); if (dst_bits >= src_bits) { try sema.requireRuntimeBlock(block, inst_src); @@ -6933,7 +6969,7 @@ fn coerce( }, .Enum => { // enum literal to enum - if (inst.ty.zigTypeTag() == .EnumLiteral) { + if (inst_ty.zigTypeTag() == .EnumLiteral) { const val = try sema.resolveConstValue(block, inst_src, inst); const bytes = val.castTag(.enum_literal).?.data; const resolved_dest_type = try sema.resolveTypeFields(block, inst_src, dest_type); @@ -6965,7 +7001,7 @@ fn coerce( else => {}, } - return mod.fail(&block.base, inst_src, "expected {}, found {}", .{ dest_type, inst.ty }); + return mod.fail(&block.base, inst_src, "expected {}, found {}", .{ dest_type, inst_ty }); } const InMemoryCoercionResult = enum { @@ -6982,7 +7018,7 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult return .no_match; } -fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) InnerError!?Air.Inst.Index { +fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) InnerError!?Air.Inst.Index { const val = inst.value() orelse return null; const src_zig_tag = inst.ty.zigTypeTag(); const dst_zig_tag = dest_type.zigTypeTag(); @@ -7020,9 +7056,15 @@ fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.I return null; } -fn coerceVarArgParam(sema: *Sema, block: *Scope.Block, inst: Air.Inst.Index) !Air.Inst.Index { - switch (inst.ty.zigTypeTag()) { - .ComptimeInt, .ComptimeFloat => return sema.mod.fail(&block.base, inst.src, "integer and float literals in var args function must be casted", .{}), +fn coerceVarArgParam( + sema: *Sema, + block: *Scope.Block, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) !Air.Inst.Ref { + const inst_ty = sema.getTypeOfAirRef(inst); + switch (inst_ty.zigTypeTag()) { + .ComptimeInt, .ComptimeFloat => return sema.mod.fail(&block.base, inst_src, "integer and float literals in var args function must be casted", .{}), else => {}, } // TODO implement more of this function. @@ -7033,8 +7075,8 @@ fn storePtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - ptr: Air.Inst.Index, - uncasted_value: Air.Inst.Index, + ptr: Air.Inst.Ref, + uncasted_value: Air.Inst.Ref, ) !void { if (ptr.ty.isConstPtr()) return sema.mod.fail(&block.base, src, "cannot assign to constant", .{}); @@ -7082,17 +7124,23 @@ fn storePtr( _ = try block.addBinOp(src, Type.initTag(.void), .store, ptr, value); } -fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { - if (inst.value()) |val| { +fn bitcast( + sema: *Sema, + block: *Scope.Block, + dest_type: Type, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) InnerError!Air.Inst.Ref { + if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { // Keep the comptime Value representation; take the new type. - return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); + return sema.addConstant(dest_type, val); } // TODO validate the type size and other compile errors - try sema.requireRuntimeBlock(block, inst.src); - return block.addUnOp(inst.src, dest_type, .bitcast, inst); + try sema.requireRuntimeBlock(block, inst_src); + return block.addTyOp(.bitcast, dest_type, inst); } -fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) InnerError!Air.Inst.Ref { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7100,7 +7148,7 @@ fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{}); } -fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Ref { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7108,12 +7156,12 @@ fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); } -fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index { +fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Ref { const decl_ref = try sema.analyzeDeclRef(block, src, decl); return sema.analyzeLoad(block, src, decl_ref, src); } -fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index { +fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Ref { try sema.mod.declareDeclDependency(sema.owner_decl, decl); sema.mod.ensureDeclAnalyzed(decl) catch |err| { if (sema.func) |func| { @@ -7128,43 +7176,41 @@ fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl if (decl_tv.val.tag() == .variable) { return sema.analyzeVarRef(block, src, decl_tv); } - return sema.mod.constInst(sema.arena, src, .{ - .ty = try sema.mod.simplePtrType(sema.arena, decl_tv.ty, false, .One), - .val = try Value.Tag.decl_ref.create(sema.arena, decl), - }); + return sema.addConstant( + try Module.simplePtrType(sema.arena, decl_tv.ty, false, .One), + try Value.Tag.decl_ref.create(sema.arena, decl), + ); } -fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!Air.Inst.Index { +fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!Air.Inst.Ref { const variable = tv.val.castTag(.variable).?.data; - const ty = try sema.mod.simplePtrType(sema.arena, tv.ty, variable.is_mutable, .One); + const ty = try Module.simplePtrType(sema.arena, tv.ty, variable.is_mutable, .One); if (!variable.is_mutable and !variable.is_extern) { - return sema.mod.constInst(sema.arena, src, .{ - .ty = ty, - .val = try Value.Tag.ref_val.create(sema.arena, variable.init), - }); + return sema.addConstant(ty, try Value.Tag.ref_val.create(sema.arena, variable.init)); } + const gpa = sema.gpa; try sema.requireRuntimeBlock(block, src); - const inst = try sema.arena.create(Inst.VarPtr); - inst.* = .{ - .base = .{ - .tag = .varptr, - .ty = ty, - .src = src, - }, - .variable = variable, - }; - try block.instructions.append(sema.gpa, &inst.base); - return &inst.base; + try sema.air_variables.append(gpa, variable); + const result_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + try sema.air_instructions.append(gpa, .{ + .tag = .varptr, + .data = .{ .ty_pl = .{ + .ty = try sema.addType(ty), + .payload = @intCast(u32, sema.air_variables.items.len - 1), + } }, + }); + try block.instructions.append(gpa, result_inst); + return indexToRef(result_inst); } fn analyzeRef( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: Air.Inst.Index, -) InnerError!Air.Inst.Index { + operand: Air.Inst.Ref, +) InnerError!Air.Inst.Ref { const ptr_type = try sema.mod.simplePtrType(sema.arena, operand.ty, false, .One); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |val| { @@ -7182,34 +7228,32 @@ fn analyzeLoad( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - ptr: Air.Inst.Index, + ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, -) InnerError!Air.Inst.Index { - const elem_ty = switch (ptr.ty.zigTypeTag()) { - .Pointer => ptr.ty.elemType(), - else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr.ty}), +) InnerError!Air.Inst.Ref { + const ptr_ty = sema.getTypeOfAirRef(ptr); + const elem_ty = switch (ptr_ty.zigTypeTag()) { + .Pointer => ptr_ty.elemType(), + else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr_ty}), }; if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| blk: { if (ptr_val.tag() == .int_u64) break :blk; // do it at runtime - return sema.mod.constInst(sema.arena, src, .{ - .ty = elem_ty, - .val = try ptr_val.pointerDeref(sema.arena), - }); + return sema.addConstant(elem_ty, try ptr_val.pointerDeref(sema.arena)); } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, elem_ty, .load, ptr); + return block.addTyOp(.load, elem_ty, ptr); } fn analyzeIsNull( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, invert_logic: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const result_ty = Type.initTag(.bool); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |opt_val| { if (opt_val.isUndef()) { @@ -7228,8 +7272,8 @@ fn analyzeIsNonErr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: Air.Inst.Index, -) InnerError!Air.Inst.Index { + operand: Air.Inst.Ref, +) InnerError!Air.Inst.Ref { const ot = operand.ty.zigTypeTag(); if (ot != .ErrorSet and ot != .ErrorUnion) return sema.mod.constBool(sema.arena, src, true); if (ot == .ErrorSet) return sema.mod.constBool(sema.arena, src, false); @@ -7249,12 +7293,12 @@ fn analyzeSlice( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: Air.Inst.Index, - start: Air.Inst.Index, + array_ptr: Air.Inst.Ref, + start: Air.Inst.Ref, end_opt: ?Air.Inst.Index, sentinel_opt: ?Air.Inst.Index, sentinel_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const ptr_child = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -7325,10 +7369,10 @@ fn cmpNumeric( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - lhs: Air.Inst.Index, - rhs: Air.Inst.Index, + lhs: Air.Inst.Ref, + rhs: Air.Inst.Ref, op: std.math.CompareOperator, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { assert(lhs.ty.isNumeric()); assert(rhs.ty.isNumeric()); @@ -7494,7 +7538,7 @@ fn cmpNumeric( return block.addBinOp(src, Type.initTag(.bool), Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } -fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Index { if (inst.value()) |val| { return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); } @@ -7503,9 +7547,15 @@ fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Ins return block.addUnOp(inst.src, dest_type, .wrap_optional, inst); } -fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn wrapErrorUnion( + sema: *Sema, + block: *Scope.Block, + dest_type: Type, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) !Air.Inst.Index { const err_union = dest_type.castTag(.error_union).?; - if (inst.value()) |val| { + if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { if (inst.ty.zigTypeTag() != .ErrorSet) { _ = try sema.coerce(block, err_union.data.payload, inst, inst.src); } else switch (err_union.data.error_set.tag()) { @@ -7710,7 +7760,7 @@ fn getBuiltin( block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const std_pkg = mod.root_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; @@ -7938,6 +7988,68 @@ fn enumFieldSrcLoc( } else unreachable; } +/// Returns the type of the AIR instruction. +fn getTypeOfAirRef(sema: *Sema, air_ref: Air.Inst.Ref) Type { + switch (air_ref) { + .none => unreachable, + .u8_type => return Type.initTag(.u8), + .i8_type => return Type.initTag(.i8), + .u16_type => return Type.initTag(.u16), + .i16_type => return Type.initTag(.i16), + .u32_type => return Type.initTag(.u32), + .i32_type => return Type.initTag(.i32), + .u64_type => return Type.initTag(.u64), + .i64_type => return Type.initTag(.i64), + .u128_type => return Type.initTag(.u128), + .i128_type => return Type.initTag(.i128), + .usize_type => return Type.initTag(.usize), + .isize_type => return Type.initTag(.isize), + .c_short_type => return Type.initTag(.c_short), + .c_ushort_type => return Type.initTag(.c_ushort), + .c_int_type => return Type.initTag(.c_int), + .c_uint_type => return Type.initTag(.c_uint), + .c_long_type => return Type.initTag(.c_long), + .c_ulong_type => return Type.initTag(.c_ulong), + .c_longlong_type => return Type.initTag(.c_longlong), + .c_ulonglong_type => return Type.initTag(.c_ulonglong), + .c_longdouble_type => return Type.initTag(.c_longdouble), + .f16_type => return Type.initTag(.f16), + .f32_type => return Type.initTag(.f32), + .f64_type => return Type.initTag(.f64), + .f128_type => return Type.initTag(.f128), + .c_void_type => return Type.initTag(.c_void), + .bool_type => return Type.initTag(.bool), + .void_type => return Type.initTag(.void), + .type_type => return Type.initTag(.type), + .anyerror_type => return Type.initTag(.anyerror), + .comptime_int_type => return Type.initTag(.comptime_int), + .comptime_float_type => return Type.initTag(.comptime_float), + .noreturn_type => return Type.initTag(.noreturn), + .anyframe_type => return Type.initTag(.@"anyframe"), + .null_type => return Type.initTag(.@"null"), + .undefined_type => return Type.initTag(.@"undefined"), + .enum_literal_type => return Type.initTag(.enum_literal), + .atomic_ordering_type => return Type.initTag(.atomic_ordering), + .atomic_rmw_op_type => return Type.initTag(.atomic_rmw_op), + .calling_convention_type => return Type.initTag(.calling_convention), + .float_mode_type => return Type.initTag(.float_mode), + .reduce_op_type => return Type.initTag(.reduce_op), + .call_options_type => return Type.initTag(.call_options), + .export_options_type => return Type.initTag(.export_options), + .extern_options_type => return Type.initTag(.extern_options), + .manyptr_u8_type => return Type.initTag(.manyptr_u8), + .manyptr_const_u8_type => return Type.initTag(.manyptr_const_u8), + .fn_noreturn_no_args_type => return Type.initTag(.fn_noreturn_no_args), + .fn_void_no_args_type => return Type.initTag(.fn_void_no_args), + .fn_naked_noreturn_no_args_type => return Type.initTag(.fn_naked_noreturn_no_args), + .fn_ccc_void_no_args_type => return Type.initTag(.fn_ccc_void_no_args), + .single_const_pointer_to_comptime_int_type => return Type.initTag(.single_const_pointer_to_comptime_int), + .const_slice_u8_type => return Type.initTag(.const_slice_u8), + else => return sema.getAirType(air_ref), + } +} + +/// Asserts the AIR instruction is a `const_ty` and returns the type. fn getAirType(sema: *Sema, air_ref: Air.Inst.Ref) Type { var i: usize = @enumToInt(air_ref); if (i < Air.Inst.Ref.typed_value_map.len) { @@ -8014,13 +8126,27 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } +pub fn addConstant(sema: *Sema, ty: Type, val: Value) InnerError!Air.Inst.Ref { + const gpa = sema.gpa; + const ty_inst = try sema.addType(ty); + try sema.air_values.append(gpa, val); + try sema.air_instructions.append(gpa, .{ + .tag = .constant, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); +} + const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; -fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { +pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { return @intToEnum(Air.Inst.Ref, ref_start_index + inst); } -fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { +pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { const ref_int = @enumToInt(inst); if (ref_int >= ref_start_index) { return ref_int - ref_start_index; diff --git a/src/codegen.zig b/src/codegen.zig index a6c4b5ad3c..c27a1444ef 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -494,7 +494,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { defer function.blocks.deinit(bin_file.allocator); defer function.exitlude_jump_relocs.deinit(bin_file.allocator); - var call_info = function.resolveCallingConventionValues(src_loc.lazy, fn_type) catch |err| switch (err) { + var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, else => |e| return e, }; @@ -537,7 +537,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.code.items.len += 4; try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); const stack_end = self.max_end_stack; if (stack_end > math.maxInt(i32)) @@ -578,7 +578,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }); } else { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); try self.dbgSetEpilogueBegin(); } }, @@ -758,11 +758,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } // TODO inline this logic into every instruction - var i: ir.Inst.DeathsBitIndex = 0; - while (inst.getOperand(i)) |operand| : (i += 1) { - if (inst.operandDies(i)) - self.processDeath(operand); - } + @panic("TODO rework AIR memory layout codegen for processing deaths"); + //var i: ir.Inst.DeathsBitIndex = 0; + //while (inst.getOperand(i)) |operand| : (i += 1) { + // if (inst.operandDies(i)) + // self.processDeath(operand); + //} } } @@ -858,74 +859,76 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const air_tags = self.air.instructions.items(.tag); switch (air_tags[inst]) { // zig fmt: off - .add => return self.genAdd(inst.castTag(.add).?), - .addwrap => return self.genAddWrap(inst.castTag(.addwrap).?), - .sub => return self.genSub(inst.castTag(.sub).?), - .subwrap => return self.genSubWrap(inst.castTag(.subwrap).?), - .mul => return self.genMul(inst.castTag(.mul).?), - .mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?), - .div => return self.genDiv(inst.castTag(.div).?), - - .cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt), - .cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte), - .cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq), - .cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte), - .cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt), - .cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq), - - .bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), - .bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), - .bit_and => return self.genBitAnd(inst.castTag(.bit_and).?), - .bit_or => return self.genBitOr(inst.castTag(.bit_or).?), - .xor => return self.genXor(inst.castTag(.xor).?), - - .alloc => return self.genAlloc(inst.castTag(.alloc).?), - .arg => return self.genArg(inst.castTag(.arg).?), - .assembly => return self.genAsm(inst.castTag(.assembly).?), - .bitcast => return self.genBitCast(inst.castTag(.bitcast).?), - .block => return self.genBlock(inst.castTag(.block).?), - .br => return self.genBr(inst.castTag(.br).?), - .br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), - .breakpoint => return self.genBreakpoint(inst.src), - .call => return self.genCall(inst.castTag(.call).?), - .cond_br => return self.genCondBr(inst.castTag(.condbr).?), - .dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?), - .floatcast => return self.genFloatCast(inst.castTag(.floatcast).?), - .intcast => return self.genIntCast(inst.castTag(.intcast).?), - .is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?), - .is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?), - .is_null => return self.genIsNull(inst.castTag(.is_null).?), - .is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), - .is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?), - .is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?), - .is_err => return self.genIsErr(inst.castTag(.is_err).?), - .is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), - .load => return self.genLoad(inst.castTag(.load).?), - .loop => return self.genLoop(inst.castTag(.loop).?), - .not => return self.genNot(inst.castTag(.not).?), - .ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?), - .ref => return self.genRef(inst.castTag(.ref).?), - .ret => return self.genRet(inst.castTag(.ret).?), - .store => return self.genStore(inst.castTag(.store).?), - .struct_field_ptr=> return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), - .switchbr => return self.genSwitch(inst.castTag(.switchbr).?), - .varptr => return self.genVarPtr(inst.castTag(.varptr).?), - - .constant => unreachable, // excluded from function bodies - .unreach => return MCValue{ .unreach = {} }, - - .optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), - .optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), - .unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?), - .unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?), - .unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?), - .unwrap_errunion_payload_ptr=> return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?), - - .wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), - .wrap_errunion_payload => return self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), - .wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?), + //.add => return self.genAdd(inst.castTag(.add).?), + //.addwrap => return self.genAddWrap(inst.castTag(.addwrap).?), + //.sub => return self.genSub(inst.castTag(.sub).?), + //.subwrap => return self.genSubWrap(inst.castTag(.subwrap).?), + //.mul => return self.genMul(inst.castTag(.mul).?), + //.mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?), + //.div => return self.genDiv(inst.castTag(.div).?), + + //.cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt), + //.cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte), + //.cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq), + //.cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte), + //.cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt), + //.cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq), + + //.bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), + //.bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), + //.bit_and => return self.genBitAnd(inst.castTag(.bit_and).?), + //.bit_or => return self.genBitOr(inst.castTag(.bit_or).?), + //.xor => return self.genXor(inst.castTag(.xor).?), + + //.alloc => return self.genAlloc(inst.castTag(.alloc).?), + //.arg => return self.genArg(inst.castTag(.arg).?), + //.assembly => return self.genAsm(inst.castTag(.assembly).?), + //.bitcast => return self.genBitCast(inst.castTag(.bitcast).?), + //.block => return self.genBlock(inst.castTag(.block).?), + //.br => return self.genBr(inst.castTag(.br).?), + //.br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), + //.breakpoint => return self.genBreakpoint(inst.src), + //.call => return self.genCall(inst.castTag(.call).?), + //.cond_br => return self.genCondBr(inst.castTag(.condbr).?), + //.dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?), + //.floatcast => return self.genFloatCast(inst.castTag(.floatcast).?), + //.intcast => return self.genIntCast(inst.castTag(.intcast).?), + //.is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?), + //.is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?), + //.is_null => return self.genIsNull(inst.castTag(.is_null).?), + //.is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), + //.is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?), + //.is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?), + //.is_err => return self.genIsErr(inst.castTag(.is_err).?), + //.is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), + //.load => return self.genLoad(inst.castTag(.load).?), + //.loop => return self.genLoop(inst.castTag(.loop).?), + //.not => return self.genNot(inst.castTag(.not).?), + //.ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?), + //.ref => return self.genRef(inst.castTag(.ref).?), + //.ret => return self.genRet(inst.castTag(.ret).?), + //.store => return self.genStore(inst.castTag(.store).?), + //.struct_field_ptr=> return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), + //.switch_br => return self.genSwitch(inst.castTag(.switchbr).?), + //.varptr => return self.genVarPtr(inst.castTag(.varptr).?), + + //.constant => unreachable, // excluded from function bodies + //.unreach => return MCValue{ .unreach = {} }, + + //.optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), + //.optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), + //.unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?), + //.unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?), + //.unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?), + //.unwrap_errunion_payload_ptr=> return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?), + + //.wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), + //.wrap_errunion_payload => return self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), + //.wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?), // zig fmt: on + + else => @panic("TODO finish air memory layout branch, more codegen.zig instructions"), } } @@ -4785,14 +4788,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }; } - fn fail(self: *Self, src: LazySrcLoc, comptime format: []const u8, args: anytype) InnerError { + fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError { @setCold(true); assert(self.err_msg == null); - const src_loc = if (src != .unneeded) - src.toSrcLocWithDecl(self.mod_fn.owner_decl) - else - self.src_loc; - self.err_msg = try ErrorMsg.create(self.bin_file.allocator, src_loc, format, args); + self.err_msg = try ErrorMsg.create(self.bin_file.allocator, self.src_loc, format, args); return error.CodegenFail; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 4743494f35..0ee6972654 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -25,7 +25,7 @@ pub const CValue = union(enum) { /// Index into local_names, but take the address. local_ref: usize, /// A constant instruction, to be rendered inline. - constant: *Inst, + constant: Air.Inst.Index, /// Index into the parameters arg: usize, /// By-value @@ -99,7 +99,7 @@ pub const Object = struct { gpa: *mem.Allocator, code: std.ArrayList(u8), value_map: CValueMap, - blocks: std.AutoHashMapUnmanaged(*ir.Inst.Block, BlockData) = .{}, + blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, next_arg_index: usize = 0, next_local_index: usize = 0, next_block_index: usize = 0, @@ -133,7 +133,12 @@ pub const Object = struct { .none => unreachable, .local => |i| return w.print("t{d}", .{i}), .local_ref => |i| return w.print("&t{d}", .{i}), - .constant => |inst| return o.dg.renderValue(w, inst.ty, inst.value().?), + .constant => |inst| { + const ty_pl = o.air.instructions.items(.data)[inst].ty_pl; + const ty = o.air.getRefType(ty_pl.ty); + const val = o.air.values[ty_pl.payload]; + return o.dg.renderValue(w, ty, val); + }, .arg => |i| return w.print("a{d}", .{i}), .decl => |decl| return w.writeAll(mem.span(decl.name)), .decl_ref => |decl| return w.print("&{s}", .{decl.name}), @@ -213,8 +218,9 @@ pub const DeclGen = struct { error_msg: ?*Module.ErrorMsg, typedefs: TypedefMap, - fn fail(dg: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { + fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { @setCold(true); + const src: LazySrcLoc = .{ .node_offset = 0 }; const src_loc = src.toSrcLocWithDecl(dg.decl); dg.error_msg = try Module.ErrorMsg.create(dg.module.gpa, src_loc, format, args); return error.AnalysisFail; @@ -230,7 +236,7 @@ pub const DeclGen = struct { // This should lower to 0xaa bytes in safe modes, and for unsafe modes should // lower to leaving variables uninitialized (that might need to be implemented // outside of this function). - return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement renderValue undef", .{}); + return dg.fail("TODO: C backend: implement renderValue undef", .{}); } switch (t.zigTypeTag()) { .Int => { @@ -440,7 +446,7 @@ pub const DeclGen = struct { }, else => unreachable, }, - else => |e| return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement value {s}", .{ + else => |e| return dg.fail("TODO: C backend: implement value {s}", .{ @tagName(e), }), } @@ -519,14 +525,14 @@ pub const DeclGen = struct { break; } } else { - return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement integer types larger than 128 bits", .{}); + return dg.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); } }, else => unreachable, } }, - .Float => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Float", .{}), + .Float => return dg.fail("TODO: C backend: implement type Float", .{}), .Pointer => { if (t.isSlice()) { @@ -681,7 +687,7 @@ pub const DeclGen = struct { try dg.renderType(w, int_tag_ty); }, - .Union => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Union", .{}), + .Union => return dg.fail("TODO: C backend: implement type Union", .{}), .Fn => { try dg.renderType(w, t.fnReturnType()); try w.writeAll(" (*)("); @@ -704,10 +710,10 @@ pub const DeclGen = struct { } try w.writeByte(')'); }, - .Opaque => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Opaque", .{}), - .Frame => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Frame", .{}), - .AnyFrame => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type AnyFrame", .{}), - .Vector => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Vector", .{}), + .Opaque => return dg.fail("TODO: C backend: implement type Opaque", .{}), + .Frame => return dg.fail("TODO: C backend: implement type Frame", .{}), + .AnyFrame => return dg.fail("TODO: C backend: implement type AnyFrame", .{}), + .Vector => return dg.fail("TODO: C backend: implement type Vector", .{}), .Null, .Undefined, @@ -760,7 +766,8 @@ pub fn genDecl(o: *Object) !void { try o.dg.renderFunctionSignature(o.writer(), is_global); try o.writer().writeByte(' '); - try genBody(o, func.body); + const main_body = o.air.getMainBody(); + try genBody(o, main_body); try o.indent_writer.insertNewline(); return; @@ -833,9 +840,9 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { } } -pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!void { +fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void { const writer = o.writer(); - if (body.instructions.len == 0) { + if (body.len == 0) { try writer.writeAll("{}"); return; } @@ -843,82 +850,85 @@ pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!voi try writer.writeAll("{\n"); o.indent_writer.pushIndent(); - for (body.instructions) |inst| { - const result_value = switch (inst.tag) { - // TODO use a different strategy for add that communicates to the optimizer - // that wrapping is UB. - .add => try genBinOp(o, inst.castTag(.add).?, " + "), - .addwrap => try genWrapOp(o, inst.castTag(.addwrap).?, " + ", "addw_"), - // TODO use a different strategy for sub that communicates to the optimizer - // that wrapping is UB. - .sub => try genBinOp(o, inst.castTag(.sub).?, " - "), - .subwrap => try genWrapOp(o, inst.castTag(.subwrap).?, " - ", "subw_"), - // TODO use a different strategy for mul that communicates to the optimizer - // that wrapping is UB. - .mul => try genBinOp(o, inst.castTag(.sub).?, " * "), - .mulwrap => try genWrapOp(o, inst.castTag(.mulwrap).?, " * ", "mulw_"), - // TODO use a different strategy for div that communicates to the optimizer - // that wrapping is UB. - .div => try genBinOp(o, inst.castTag(.div).?, " / "), - - .constant => unreachable, // excluded from function bodies - .alloc => try genAlloc(o, inst.castTag(.alloc).?), - .arg => genArg(o), - .assembly => try genAsm(o, inst.castTag(.assembly).?), - .block => try genBlock(o, inst.castTag(.block).?), - .bitcast => try genBitcast(o, inst.castTag(.bitcast).?), - .breakpoint => try genBreakpoint(o, inst.castTag(.breakpoint).?), - .call => try genCall(o, inst.castTag(.call).?), - .cmp_eq => try genBinOp(o, inst.castTag(.cmp_eq).?, " == "), - .cmp_gt => try genBinOp(o, inst.castTag(.cmp_gt).?, " > "), - .cmp_gte => try genBinOp(o, inst.castTag(.cmp_gte).?, " >= "), - .cmp_lt => try genBinOp(o, inst.castTag(.cmp_lt).?, " < "), - .cmp_lte => try genBinOp(o, inst.castTag(.cmp_lte).?, " <= "), - .cmp_neq => try genBinOp(o, inst.castTag(.cmp_neq).?, " != "), - .dbg_stmt => try genDbgStmt(o, inst.castTag(.dbg_stmt).?), - .intcast => try genIntCast(o, inst.castTag(.intcast).?), - .load => try genLoad(o, inst.castTag(.load).?), - .ret => try genRet(o, inst.castTag(.ret).?), - .retvoid => try genRetVoid(o), - .store => try genStore(o, inst.castTag(.store).?), - .unreach => try genUnreach(o, inst.castTag(.unreach).?), - .loop => try genLoop(o, inst.castTag(.loop).?), - .condbr => try genCondBr(o, inst.castTag(.condbr).?), - .br => try genBr(o, inst.castTag(.br).?), - .br_void => try genBrVoid(o, inst.castTag(.br_void).?.block), - .switchbr => try genSwitchBr(o, inst.castTag(.switchbr).?), - // bool_and and bool_or are non-short-circuit operations - .bool_and => try genBinOp(o, inst.castTag(.bool_and).?, " & "), - .bool_or => try genBinOp(o, inst.castTag(.bool_or).?, " | "), - .bit_and => try genBinOp(o, inst.castTag(.bit_and).?, " & "), - .bit_or => try genBinOp(o, inst.castTag(.bit_or).?, " | "), - .xor => try genBinOp(o, inst.castTag(.xor).?, " ^ "), - .not => try genUnOp(o, inst.castTag(.not).?, "!"), - .is_null => try genIsNull(o, inst.castTag(.is_null).?), - .is_non_null => try genIsNull(o, inst.castTag(.is_non_null).?), - .is_null_ptr => try genIsNull(o, inst.castTag(.is_null_ptr).?), - .is_non_null_ptr => try genIsNull(o, inst.castTag(.is_non_null_ptr).?), - .wrap_optional => try genWrapOptional(o, inst.castTag(.wrap_optional).?), - .optional_payload => try genOptionalPayload(o, inst.castTag(.optional_payload).?), - .optional_payload_ptr => try genOptionalPayload(o, inst.castTag(.optional_payload_ptr).?), - .ref => try genRef(o, inst.castTag(.ref).?), - .struct_field_ptr => try genStructFieldPtr(o, inst.castTag(.struct_field_ptr).?), - - .is_err => try genIsErr(o, inst.castTag(.is_err).?, "", ".", "!="), - .is_non_err => try genIsErr(o, inst.castTag(.is_non_err).?, "", ".", "=="), - .is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?, "*", "->", "!="), - .is_non_err_ptr => try genIsErr(o, inst.castTag(.is_non_err_ptr).?, "*", "->", "=="), - - .unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?), - .unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?), - .unwrap_errunion_payload_ptr => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload_ptr).?), - .unwrap_errunion_err_ptr => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err_ptr).?), - .wrap_errunion_payload => try genWrapErrUnionPay(o, inst.castTag(.wrap_errunion_payload).?), - .wrap_errunion_err => try genWrapErrUnionErr(o, inst.castTag(.wrap_errunion_err).?), - .br_block_flat => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for br_block_flat", .{}), - .ptrtoint => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for ptrtoint", .{}), - .varptr => try genVarPtr(o, inst.castTag(.varptr).?), - .floatcast => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for floatcast", .{}), + const air_tags = o.air.instructions.items(.tag); + + for (body) |inst| { + const result_value = switch (air_tags[inst]) { + //// TODO use a different strategy for add that communicates to the optimizer + //// that wrapping is UB. + //.add => try genBinOp(o, inst.castTag(.add).?, " + "), + //.addwrap => try genWrapOp(o, inst.castTag(.addwrap).?, " + ", "addw_"), + //// TODO use a different strategy for sub that communicates to the optimizer + //// that wrapping is UB. + //.sub => try genBinOp(o, inst.castTag(.sub).?, " - "), + //.subwrap => try genWrapOp(o, inst.castTag(.subwrap).?, " - ", "subw_"), + //// TODO use a different strategy for mul that communicates to the optimizer + //// that wrapping is UB. + //.mul => try genBinOp(o, inst.castTag(.sub).?, " * "), + //.mulwrap => try genWrapOp(o, inst.castTag(.mulwrap).?, " * ", "mulw_"), + //// TODO use a different strategy for div that communicates to the optimizer + //// that wrapping is UB. + //.div => try genBinOp(o, inst.castTag(.div).?, " / "), + + //.constant => unreachable, // excluded from function bodies + //.alloc => try genAlloc(o, inst.castTag(.alloc).?), + //.arg => genArg(o), + //.assembly => try genAsm(o, inst.castTag(.assembly).?), + //.block => try genBlock(o, inst.castTag(.block).?), + //.bitcast => try genBitcast(o, inst.castTag(.bitcast).?), + //.breakpoint => try genBreakpoint(o, inst.castTag(.breakpoint).?), + //.call => try genCall(o, inst.castTag(.call).?), + //.cmp_eq => try genBinOp(o, inst.castTag(.cmp_eq).?, " == "), + //.cmp_gt => try genBinOp(o, inst.castTag(.cmp_gt).?, " > "), + //.cmp_gte => try genBinOp(o, inst.castTag(.cmp_gte).?, " >= "), + //.cmp_lt => try genBinOp(o, inst.castTag(.cmp_lt).?, " < "), + //.cmp_lte => try genBinOp(o, inst.castTag(.cmp_lte).?, " <= "), + //.cmp_neq => try genBinOp(o, inst.castTag(.cmp_neq).?, " != "), + //.dbg_stmt => try genDbgStmt(o, inst.castTag(.dbg_stmt).?), + //.intcast => try genIntCast(o, inst.castTag(.intcast).?), + //.load => try genLoad(o, inst.castTag(.load).?), + //.ret => try genRet(o, inst.castTag(.ret).?), + //.retvoid => try genRetVoid(o), + //.store => try genStore(o, inst.castTag(.store).?), + //.unreach => try genUnreach(o, inst.castTag(.unreach).?), + //.loop => try genLoop(o, inst.castTag(.loop).?), + //.condbr => try genCondBr(o, inst.castTag(.condbr).?), + //.br => try genBr(o, inst.castTag(.br).?), + //.br_void => try genBrVoid(o, inst.castTag(.br_void).?.block), + //.switchbr => try genSwitchBr(o, inst.castTag(.switchbr).?), + //// bool_and and bool_or are non-short-circuit operations + //.bool_and => try genBinOp(o, inst.castTag(.bool_and).?, " & "), + //.bool_or => try genBinOp(o, inst.castTag(.bool_or).?, " | "), + //.bit_and => try genBinOp(o, inst.castTag(.bit_and).?, " & "), + //.bit_or => try genBinOp(o, inst.castTag(.bit_or).?, " | "), + //.xor => try genBinOp(o, inst.castTag(.xor).?, " ^ "), + //.not => try genUnOp(o, inst.castTag(.not).?, "!"), + //.is_null => try genIsNull(o, inst.castTag(.is_null).?), + //.is_non_null => try genIsNull(o, inst.castTag(.is_non_null).?), + //.is_null_ptr => try genIsNull(o, inst.castTag(.is_null_ptr).?), + //.is_non_null_ptr => try genIsNull(o, inst.castTag(.is_non_null_ptr).?), + //.wrap_optional => try genWrapOptional(o, inst.castTag(.wrap_optional).?), + //.optional_payload => try genOptionalPayload(o, inst.castTag(.optional_payload).?), + //.optional_payload_ptr => try genOptionalPayload(o, inst.castTag(.optional_payload_ptr).?), + //.ref => try genRef(o, inst.castTag(.ref).?), + //.struct_field_ptr => try genStructFieldPtr(o, inst.castTag(.struct_field_ptr).?), + + //.is_err => try genIsErr(o, inst.castTag(.is_err).?, "", ".", "!="), + //.is_non_err => try genIsErr(o, inst.castTag(.is_non_err).?, "", ".", "=="), + //.is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?, "*", "->", "!="), + //.is_non_err_ptr => try genIsErr(o, inst.castTag(.is_non_err_ptr).?, "*", "->", "=="), + + //.unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?), + //.unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?), + //.unwrap_errunion_payload_ptr => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload_ptr).?), + //.unwrap_errunion_err_ptr => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err_ptr).?), + //.wrap_errunion_payload => try genWrapErrUnionPay(o, inst.castTag(.wrap_errunion_payload).?), + //.wrap_errunion_err => try genWrapErrUnionErr(o, inst.castTag(.wrap_errunion_err).?), + //.br_block_flat => return o.dg.fail("TODO: C backend: implement codegen for br_block_flat", .{}), + //.ptrtoint => return o.dg.fail("TODO: C backend: implement codegen for ptrtoint", .{}), + //.varptr => try genVarPtr(o, inst.castTag(.varptr).?), + //.floatcast => return o.dg.fail("TODO: C backend: implement codegen for floatcast", .{}), + else => return o.dg.fail("TODO: C backend: rework AIR memory layout", .{}), }; switch (result_value) { .none => {}, @@ -1060,7 +1070,7 @@ fn genWrapOp(o: *Object, inst: *Inst.BinOp, str_op: [*:0]const u8, fn_op: [*:0]c } if (bits > 64) { - return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: genWrapOp for large integers", .{}); + return o.dg.fail("TODO: C backend: genWrapOp for large integers", .{}); } var min_buf: [80]u8 = undefined; @@ -1227,7 +1237,7 @@ fn genCall(o: *Object, inst: *Inst.Call) !CValue { try writer.writeAll(");\n"); return result_local; } else { - return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement function pointers", .{}); + return o.dg.fail("TODO: C backend: implement function pointers", .{}); } } @@ -1390,13 +1400,13 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue { try o.writeCValue(writer, arg_c_value); try writer.writeAll(";\n"); } else { - return o.dg.fail(.{ .node_offset = 0 }, "TODO non-explicit inline asm regs", .{}); + return o.dg.fail("TODO non-explicit inline asm regs", .{}); } } const volatile_string: []const u8 = if (as.is_volatile) "volatile " else ""; try writer.print("__asm {s}(\"{s}\"", .{ volatile_string, as.asm_source }); if (as.output_constraint) |_| { - return o.dg.fail(.{ .node_offset = 0 }, "TODO: CBE inline asm output", .{}); + return o.dg.fail("TODO: CBE inline asm output", .{}); } if (as.inputs.len > 0) { if (as.output_constraint == null) { @@ -1421,7 +1431,7 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue { if (as.base.isUnused()) return CValue.none; - return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: inline asm expression result used", .{}); + return o.dg.fail("TODO: C backend: inline asm expression result used", .{}); } fn genIsNull(o: *Object, inst: *Inst.UnOp) !CValue { diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 0d05b97846..c93f04f618 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2519,6 +2519,9 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); + var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_line_buffer.deinit(); + var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); defer dbg_info_buffer.deinit(); diff --git a/src/value.zig b/src/value.zig index 48cd6fffc4..0f7194d8c1 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1700,7 +1700,7 @@ pub const Value = extern union { /// peer type resolution. This is stored in a separate list so that /// the items are contiguous in memory and thus can be passed to /// `Module.resolvePeerTypes`. - stored_inst_list: std.ArrayListUnmanaged(*ir.Inst) = .{}, + stored_inst_list: std.ArrayListUnmanaged(Air.Inst.Index) = .{}, }, }; -- cgit v1.2.3 From dbd3529d1fa02d5e720df0fbf2436d646f5a4f57 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 13 Jul 2021 21:49:22 -0700 Subject: Sema: first pass reworking for AIR memory layout --- BRANCH_TODO | 89 ------------ src/Module.zig | 50 +++++-- src/Sema.zig | 444 ++++++++++++++++++++++++++++++++------------------------- 3 files changed, 286 insertions(+), 297 deletions(-) (limited to 'src/Module.zig') diff --git a/BRANCH_TODO b/BRANCH_TODO index c7f3923559..aaba8b70b3 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -569,95 +569,6 @@ const DumpAir = struct { } }; -pub fn constInst(mod: *Module, arena: *Allocator, src: LazySrcLoc, typed_value: TypedValue) !*ir.Inst { - _ = mod; - const const_inst = try arena.create(ir.Inst.Constant); - const_inst.* = .{ - .base = .{ - .tag = ir.Inst.Constant.base_tag, - .ty = typed_value.ty, - .src = src, - }, - .val = typed_value.val, - }; - return &const_inst.base; -} - -pub fn constType(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.type), - .val = try ty.toValue(arena), - }); -} - -pub fn constVoid(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.void), - .val = Value.initTag(.void_value), - }); -} - -pub fn constNoReturn(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.noreturn), - .val = Value.initTag(.unreachable_value), - }); -} - -pub fn constUndef(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = ty, - .val = Value.initTag(.undef), - }); -} - -pub fn constBool(mod: *Module, arena: *Allocator, src: LazySrcLoc, v: bool) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.bool), - .val = ([2]Value{ Value.initTag(.bool_false), Value.initTag(.bool_true) })[@boolToInt(v)], - }); -} - -pub fn constIntUnsigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: u64) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_u64.create(arena, int), - }); -} - -pub fn constIntSigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: i64) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_i64.create(arena, int), - }); -} - -pub fn constIntBig(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, big_int: BigIntConst) !*ir.Inst { - if (big_int.positive) { - if (big_int.to(u64)) |x| { - return mod.constIntUnsigned(arena, src, ty, x); - } else |err| switch (err) { - error.NegativeIntoUnsigned => unreachable, - error.TargetTooSmall => {}, // handled below - } - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_big_positive.create(arena, big_int.limbs), - }); - } else { - if (big_int.to(i64)) |x| { - return mod.constIntSigned(arena, src, ty, x); - } else |err| switch (err) { - error.NegativeIntoUnsigned => unreachable, - error.TargetTooSmall => {}, // handled below - } - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_big_negative.create(arena, big_int.limbs), - }); - } -} - pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { const zir_module = scope.namespace(); const source = zir_module.getSource(mod) catch @panic("dumpInst failed to get source"); diff --git a/src/Module.zig b/src/Module.zig index 7ec9c7e93d..3ce3c47f14 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1232,22 +1232,52 @@ pub const Scope = struct { ty: Type, operand: Air.Inst.Ref, ) error{OutOfMemory}!Air.Inst.Ref { + return block.addInst(.{ + .tag = tag, + .data = .{ .ty_op = .{ + .ty = try block.sema.addType(ty), + .operand = operand, + } }, + }); + } + + pub fn addUnOp( + block: *Block, + tag: Air.Inst.Tag, + operand: Air.Inst.Ref, + ) error{OutOfMemory}!Air.Inst.Ref { + return block.addInst(.{ + .tag = tag, + .data = .{ .un_op = operand }, + }); + } + + pub fn addBinOp( + block: *Block, + tag: Air.Inst.Tag, + lhs: Air.Inst.Ref, + rhs: Air.Inst.Ref, + ) error{OutOfMemory}!Air.Inst.Ref { + return block.addInst(.{ + .tag = tag, + .data = .{ .bin_op = .{ + .lhs = lhs, + .rhs = rhs, + } }, + }); + } + + pub fn addInst(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref { const sema = block.sema; const gpa = sema.gpa; try sema.air_instructions.ensureUnusedCapacity(gpa, 1); try block.instructions.ensureUnusedCapacity(gpa, 1); - const inst = @intCast(Air.Inst.Index, sema.air_instructions.len); - sema.air_instructions.appendAssumeCapacity(.{ - .tag = tag, - .data = .{ .ty_op = .{ - .ty = try sema.addType(ty), - .operand = operand, - } }, - }); - block.instructions.appendAssumeCapacity(inst); - return Sema.indexToRef(inst); + const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len); + sema.air_instructions.appendAssumeCapacity(inst); + block.instructions.appendAssumeCapacity(result_index); + return Sema.indexToRef(result_index); } }; }; diff --git a/src/Sema.zig b/src/Sema.zig index fc130cd4a4..829dd843cc 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -534,7 +534,7 @@ pub fn analyzeBody( //}, else => @panic("TODO finish updating Sema for AIR memory layout changes and then remove this else prong"), }; - if (sema.getAirType(air_inst).isNoReturn()) + if (sema.getTypeOf(air_inst).isNoReturn()) return always_noreturn; try map.put(sema.gpa, inst, air_inst); i += 1; @@ -664,7 +664,7 @@ fn resolvePossiblyUndefinedValue( src: LazySrcLoc, air_ref: Air.Inst.Ref, ) !?Value { - const ty = sema.getTypeOfAirRef(air_ref); + const ty = sema.getTypeOf(air_ref); if (try sema.typeHasOnePossibleValue(block, src, ty)) |opv| { return opv; } @@ -737,7 +737,7 @@ pub fn resolveInstConst( const air_ref = sema.resolveInst(zir_ref); const val = try sema.resolveConstValue(block, src, air_ref); return TypedValue{ - .ty = sema.getTypeOfAirRef(air_ref), + .ty = sema.getTypeOf(air_ref), .val = val, }; } @@ -1208,7 +1208,7 @@ fn zirRetType( try sema.requireFunctionBlock(block, src); const fn_ty = sema.func.?.owner_decl.ty; const ret_type = fn_ty.fnReturnType(); - return sema.mod.constType(sema.arena, src, ret_type); + return sema.addType(ret_type); } fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { @@ -1571,7 +1571,7 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In // if expressions should force it when the condition is compile-time known. const src: LazySrcLoc = .unneeded; try sema.requireRuntimeBlock(block, src); - const bitcasted_ptr = try block.addUnOp(src, ptr_ty, .bitcast, ptr); + const bitcasted_ptr = try block.addTyOp(.bitcast, ptr_ty, ptr); return sema.storePtr(block, src, bitcasted_ptr, value); } @@ -1590,7 +1590,7 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) // Create a runtime bitcast instruction with exactly the type the pointer wants. const ptr_ty = try Module.simplePtrType(sema.arena, value.ty, true, .One); try sema.requireRuntimeBlock(block, src); - const bitcasted_ptr = try block.addUnOp(src, ptr_ty, .bitcast, ptr); + const bitcasted_ptr = try block.addTyOp(.bitcast, ptr_ty, ptr); return sema.storePtr(block, src, bitcasted_ptr, value); } @@ -1646,7 +1646,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const param_count = fn_ty.fnParamLen(); if (param_index >= param_count) { if (fn_ty.fnIsVarArgs()) { - return sema.mod.constType(sema.arena, src, Type.initTag(.var_args_param)); + return sema.addType(Type.initTag(.var_args_param)); } return sema.mod.fail(&block.base, src, "arg index {d} out of bounds; '{}' has {d} argument(s)", .{ param_index, @@ -1657,7 +1657,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr // TODO support generic functions const param_type = fn_ty.fnParamType(param_index); - return sema.mod.constType(sema.arena, src, param_type); + return sema.addType(param_type); } fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -1694,7 +1694,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air defer tracy.end(); const int = sema.code.instructions.items(.data)[inst].int; - return sema.mod.constIntUnsigned(sema.arena, .unneeded, Type.initTag(.comptime_int), int); + return sema.addIntUnsigned(Type.initTag(.comptime_int), int); } fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2418,10 +2418,9 @@ fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError defer tracy.end(); const int_type = sema.code.instructions.items(.data)[inst].int_type; - const src = int_type.src(); const ty = try Module.makeIntType(sema.arena, int_type.signedness, int_type.bit_count); - return sema.mod.constType(sema.arena, src, ty); + return sema.addType(ty); } fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2433,7 +2432,7 @@ fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner const child_type = try sema.resolveType(block, src, inst_data.operand); const opt_type = try sema.mod.optionalType(sema.arena, child_type); - return sema.mod.constType(sema.arena, src, opt_type); + return sema.addType(opt_type); } fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2441,12 +2440,11 @@ fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro const src = inst_data.src(); const array_type = try sema.resolveType(block, src, inst_data.operand); const elem_type = array_type.elemType(); - return sema.mod.constType(sema.arena, src, elem_type); + return sema.addType(elem_type); } fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; - const src = inst_data.src(); const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; @@ -2456,7 +2454,7 @@ fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr .len = len, .elem_type = elem_type, }); - return sema.mod.constType(sema.arena, src, vector_type); + return sema.addType(vector_type); } fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2469,7 +2467,7 @@ fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const elem_type = try sema.resolveType(block, .unneeded, bin_inst.rhs); const array_ty = try sema.mod.arrayType(sema.arena, len.val.toUnsignedInt(), null, elem_type); - return sema.mod.constType(sema.arena, .unneeded, array_ty); + return sema.addType(array_ty); } fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2484,7 +2482,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) const elem_type = try sema.resolveType(block, .unneeded, extra.elem_type); const array_ty = try sema.mod.arrayType(sema.arena, len.val.toUnsignedInt(), sentinel.val, elem_type); - return sema.mod.constType(sema.arena, .unneeded, array_ty); + return sema.addType(array_ty); } fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2492,12 +2490,11 @@ fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_anyframe_type = inst_data.src_node }; const return_type = try sema.resolveType(block, operand_src, inst_data.operand); const anyframe_type = try Type.Tag.anyframe_T.create(sema.arena, return_type); - return sema.mod.constType(sema.arena, src, anyframe_type); + return sema.addType(anyframe_type); } fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2506,7 +2503,6 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const error_union = try sema.resolveType(block, lhs_src, extra.lhs); @@ -2518,7 +2514,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn }); } const err_union_ty = try sema.mod.errorUnionType(sema.arena, error_union, payload); - return sema.mod.constType(sema.arena, src, err_union_ty); + return sema.addType(err_union_ty); } fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2553,7 +2549,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr if (try sema.resolvePossiblyUndefinedValue(block, src, op_coerced)) |val| { if (val.isUndef()) { - return sema.mod.constUndef(sema.arena, src, result_ty); + return sema.addConstUndef(result_ty); } const payload = try sema.arena.create(Value.Payload.U64); payload.* = .{ @@ -2567,7 +2563,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, result_ty, .bitcast, op_coerced); + return block.addTyOp(.bitcast, result_ty, op_coerced); } fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2600,7 +2596,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr // const is_gt_max = @panic("TODO get max errors in compilation"); // try sema.addSafetyCheck(block, is_gt_max, .invalid_error_code); } - return block.addUnOp(src, Type.initTag(.anyerror), .bitcast, op); + return block.addTyOp(.bitcast, Type.initTag(.anyerror), op); } fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2614,7 +2610,9 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); - if (rhs.ty.zigTypeTag() == .Bool and lhs.ty.zigTypeTag() == .Bool) { + const lhs_ty = sema.getTypeOf(lhs); + const rhs_ty = sema.getTypeOf(rhs); + if (rhs_ty.zigTypeTag() == .Bool and lhs_ty.zigTypeTag() == .Bool) { const msg = msg: { const msg = try sema.mod.errMsg(&block.base, lhs_src, "expected error set type, found 'bool'", .{}); errdefer msg.destroy(sema.gpa); @@ -2623,8 +2621,6 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn }; return sema.mod.failWithOwnedErrorMsg(&block.base, msg); } - const rhs_ty = try sema.resolveAirAsType(block, rhs_src, rhs); - const lhs_ty = try sema.resolveAirAsType(block, lhs_src, lhs); if (rhs_ty.zigTypeTag() != .ErrorSet) return sema.mod.fail(&block.base, rhs_src, "expected error set type, found {}", .{rhs_ty}); if (lhs_ty.zigTypeTag() != .ErrorSet) @@ -2786,7 +2782,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, int_tag_ty, .bitcast, enum_tag); + return block.addTyOp(.bitcast, int_tag_ty, enum_tag); } fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2841,7 +2837,7 @@ fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, dest_ty, .bitcast, operand); + return block.addTyOp(.bitcast, dest_ty, operand); } /// Pointer in, pointer out. @@ -2881,10 +2877,10 @@ fn zirOptionalPayloadPtr( try sema.requireRuntimeBlock(block, src); if (safety_check and block.wantSafety()) { - const is_non_null = try block.addUnOp(src, Type.initTag(.bool), .is_non_null_ptr, optional_ptr); + const is_non_null = try block.addUnOp(.is_non_null_ptr, optional_ptr); try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } - return block.addUnOp(src, child_pointer, .optional_payload_ptr, optional_ptr); + return block.addTyOp(.optional_payload_ptr, child_pointer, optional_ptr); } /// Value in, value out. @@ -2919,10 +2915,10 @@ fn zirOptionalPayload( try sema.requireRuntimeBlock(block, src); if (safety_check and block.wantSafety()) { - const is_non_null = try block.addUnOp(src, Type.initTag(.bool), .is_non_null, operand); + const is_non_null = try block.addUnOp(.is_non_null, operand); try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } - return block.addUnOp(src, child_type, .optional_payload, operand); + return block.addTyOp(.optional_payload, child_type, operand); } /// Value in, value out @@ -2953,10 +2949,11 @@ fn zirErrUnionPayload( } try sema.requireRuntimeBlock(block, src); if (safety_check and block.wantSafety()) { - const is_non_err = try block.addUnOp(src, Type.initTag(.bool), .is_err, operand); + const is_non_err = try block.addUnOp(.is_err, operand); try sema.addSafetyCheck(block, is_non_err, .unwrap_errunion); } - return block.addUnOp(src, operand.ty.castTag(.error_union).?.data.payload, .unwrap_errunion_payload, operand); + const result_ty = operand.ty.castTag(.error_union).?.data.payload; + return block.addTyOp(.unwrap_errunion_payload, result_ty, operand); } /// Pointer in, pointer out. @@ -2997,10 +2994,10 @@ fn zirErrUnionPayloadPtr( try sema.requireRuntimeBlock(block, src); if (safety_check and block.wantSafety()) { - const is_non_err = try block.addUnOp(src, Type.initTag(.bool), .is_err, operand); + const is_non_err = try block.addUnOp(.is_err, operand); try sema.addSafetyCheck(block, is_non_err, .unwrap_errunion); } - return block.addUnOp(src, operand_pointer_ty, .unwrap_errunion_payload_ptr, operand); + return block.addTyOp(.unwrap_errunion_payload_ptr, operand_pointer_ty, operand); } /// Value in, value out @@ -3026,7 +3023,7 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, result_ty, .unwrap_errunion_err, operand); + return block.addTyOp(.unwrap_errunion_err, result_ty, operand); } /// Pointer in, value out @@ -3055,7 +3052,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, result_ty, .unwrap_errunion_err_ptr, operand); + return block.addTyOp(.unwrap_errunion_err_ptr, result_ty, operand); } fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { @@ -3241,7 +3238,7 @@ fn funcCommon( } if (body_inst == 0) { - return mod.constType(sema.arena, src, fn_ty); + return sema.addType(fn_ty); } const is_inline = fn_ty.fnCallingConvention() == .Inline; @@ -3312,8 +3309,7 @@ fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro // TODO handle known-pointer-address const src = inst_data.src(); try sema.requireRuntimeBlock(block, src); - const ty = Type.initTag(.usize); - return block.addUnOp(src, ty, .ptrtoint, ptr); + return block.addUnOp(.ptrtoint, ptr); } fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -4244,15 +4240,14 @@ fn analyzeSwitch( case_block.instructions.shrinkRetainingCapacity(0); var any_ok: ?Air.Inst.Index = null; - const bool_ty = comptime Type.initTag(.bool); for (items) |item_ref| { const item = sema.resolveInst(item_ref); _ = try sema.resolveConstValue(&child_block, item.src, item); - const cmp_ok = try case_block.addBinOp(item.src, bool_ty, .cmp_eq, operand, item); + const cmp_ok = try case_block.addBinOp(.cmp_eq, operand, item); if (any_ok) |some| { - any_ok = try case_block.addBinOp(item.src, bool_ty, .bool_or, some, cmp_ok); + any_ok = try case_block.addBinOp(.bool_or, some, cmp_ok); } else { any_ok = cmp_ok; } @@ -4271,32 +4266,24 @@ fn analyzeSwitch( _ = try sema.resolveConstValue(&child_block, item_first.src, item_first); _ = try sema.resolveConstValue(&child_block, item_last.src, item_last); - const range_src = item_first.src; - // operand >= first and operand <= last const range_first_ok = try case_block.addBinOp( - item_first.src, - bool_ty, .cmp_gte, operand, item_first, ); const range_last_ok = try case_block.addBinOp( - item_last.src, - bool_ty, .cmp_lte, operand, item_last, ); const range_ok = try case_block.addBinOp( - range_src, - bool_ty, .bool_and, range_first_ok, range_last_ok, ); if (any_ok) |some| { - any_ok = try case_block.addBinOp(range_src, bool_ty, .bool_or, some, range_ok); + any_ok = try case_block.addBinOp(.bool_or, some, range_ok); } else { any_ok = range_ok; } @@ -4555,13 +4542,11 @@ fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const src = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const container_type = try sema.resolveType(block, lhs_src, extra.lhs); const decl_name = try sema.resolveConstString(block, rhs_src, extra.rhs); const mod = sema.mod; - const arena = sema.arena; const namespace = container_type.getNamespace() orelse return mod.fail( &block.base, @@ -4571,10 +4556,10 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError ); if (try sema.lookupInNamespace(namespace, decl_name)) |decl| { if (decl.is_pub or decl.namespace.file_scope == block.base.namespace().file_scope) { - return mod.constBool(arena, src, true); + return Air.Inst.Ref.bool_true; } } - return mod.constBool(arena, src, false); + return Air.Inst.Ref.bool_false; } fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -4599,7 +4584,7 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! try mod.semaFile(result.file); const file_root_decl = result.file.root_decl.?; try sema.mod.declareDeclDependency(sema.owner_decl, file_root_decl); - return mod.constType(sema.arena, src, file_root_decl.ty); + return sema.addType(file_root_decl.ty); } fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -4641,6 +4626,8 @@ fn zirBitwise( const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); + const lhs_ty = sema.getTypeOf(lhs); + const rhs_ty = sema.getTypeOf(rhs); const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); @@ -4654,38 +4641,38 @@ fn zirBitwise( const scalar_tag = scalar_type.zigTypeTag(); - if (lhs.ty.zigTypeTag() == .Vector and rhs.ty.zigTypeTag() == .Vector) { - if (lhs.ty.arrayLen() != rhs.ty.arrayLen()) { + if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { return sema.mod.fail(&block.base, src, "vector length mismatch: {d} and {d}", .{ - lhs.ty.arrayLen(), - rhs.ty.arrayLen(), + lhs_ty.arrayLen(), + rhs_ty.arrayLen(), }); } return sema.mod.fail(&block.base, src, "TODO implement support for vectors in zirBitwise", .{}); - } else if (lhs.ty.zigTypeTag() == .Vector or rhs.ty.zigTypeTag() == .Vector) { + } else if (lhs_ty.zigTypeTag() == .Vector or rhs_ty.zigTypeTag() == .Vector) { return sema.mod.fail(&block.base, src, "mixed scalar and vector operands to binary expression: '{}' and '{}'", .{ - lhs.ty, - rhs.ty, + lhs_ty, + rhs_ty, }); } const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; if (!is_int) { - return sema.mod.fail(&block.base, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs.ty.zigTypeTag()), @tagName(rhs.ty.zigTypeTag()) }); + return sema.mod.fail(&block.base, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag()), @tagName(rhs_ty.zigTypeTag()) }); } if (casted_lhs.value()) |lhs_val| { if (casted_rhs.value()) |rhs_val| { if (lhs_val.isUndef() or rhs_val.isUndef()) { - return sema.mod.constUndef(sema.arena, src, resolved_type); + return sema.addConstUndef(resolved_type); } return sema.mod.fail(&block.base, src, "TODO implement comptime bitwise operations", .{}); } } try sema.requireRuntimeBlock(block, src); - return block.addBinOp(src, scalar_type, air_tag, casted_lhs, casted_rhs); + return block.addBinOp(air_tag, casted_lhs, casted_rhs); } fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -4783,18 +4770,18 @@ fn analyzeArithmetic( const scalar_tag = scalar_type.zigTypeTag(); - if (lhs.ty.zigTypeTag() == .Vector and rhs.ty.zigTypeTag() == .Vector) { - if (lhs.ty.arrayLen() != rhs.ty.arrayLen()) { + if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { return sema.mod.fail(&block.base, src, "vector length mismatch: {d} and {d}", .{ - lhs.ty.arrayLen(), - rhs.ty.arrayLen(), + lhs_ty.arrayLen(), + rhs_ty.arrayLen(), }); } return sema.mod.fail(&block.base, src, "TODO implement support for vectors in zirBinOp", .{}); - } else if (lhs.ty.zigTypeTag() == .Vector or rhs.ty.zigTypeTag() == .Vector) { + } else if (lhs_ty.zigTypeTag() == .Vector or rhs_ty.zigTypeTag() == .Vector) { return sema.mod.fail(&block.base, src, "mixed scalar and vector operands to binary expression: '{}' and '{}'", .{ - lhs.ty, - rhs.ty, + lhs_ty, + rhs_ty, }); } @@ -4802,13 +4789,13 @@ fn analyzeArithmetic( const is_float = scalar_tag == .Float or scalar_tag == .ComptimeFloat; if (!is_int and !(is_float and floatOpAllowed(zir_tag))) { - return sema.mod.fail(&block.base, src, "invalid operands to binary expression: '{s}' and '{s}'", .{ @tagName(lhs.ty.zigTypeTag()), @tagName(rhs.ty.zigTypeTag()) }); + return sema.mod.fail(&block.base, src, "invalid operands to binary expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag()), @tagName(rhs_ty.zigTypeTag()) }); } if (casted_lhs.value()) |lhs_val| { if (casted_rhs.value()) |rhs_val| { if (lhs_val.isUndef() or rhs_val.isUndef()) { - return sema.mod.constUndef(sema.arena, src, resolved_type); + return sema.addConstUndef(resolved_type); } // incase rhs is 0, simply return lhs without doing any calculations // TODO Once division is implemented we should throw an error when dividing by 0. @@ -4866,7 +4853,7 @@ fn analyzeArithmetic( } try sema.requireRuntimeBlock(block, src); - const ir_tag: Inst.Tag = switch (zir_tag) { + const air_tag: Air.Inst.Tag = switch (zir_tag) { .add => .add, .addwrap => .addwrap, .sub => .sub, @@ -4877,7 +4864,7 @@ fn analyzeArithmetic( else => return sema.mod.fail(&block.base, src, "TODO implement arithmetic for operand '{s}''", .{@tagName(zir_tag)}), }; - return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); + return block.addBinOp(air_tag, casted_lhs, casted_rhs); } fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -4997,11 +4984,17 @@ fn zirCmp( .eq, .neq => true, else => false, }; - const lhs_ty_tag = lhs.ty.zigTypeTag(); - const rhs_ty_tag = rhs.ty.zigTypeTag(); + const lhs_ty = sema.getTypeOf(lhs); + const rhs_ty = sema.getTypeOf(rhs); + const lhs_ty_tag = lhs_ty.zigTypeTag(); + const rhs_ty_tag = rhs_ty.zigTypeTag(); if (is_equality_cmp and lhs_ty_tag == .Null and rhs_ty_tag == .Null) { // null == null, null != null - return mod.constBool(sema.arena, src, op == .eq); + if (op == .eq) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; + } } else if (is_equality_cmp and ((lhs_ty_tag == .Null and rhs_ty_tag == .Optional) or rhs_ty_tag == .Null and lhs_ty_tag == .Optional)) @@ -5010,11 +5003,11 @@ fn zirCmp( const opt_operand = if (lhs_ty_tag == .Optional) lhs else rhs; return sema.analyzeIsNull(block, src, opt_operand, op == .neq); } else if (is_equality_cmp and - ((lhs_ty_tag == .Null and rhs.ty.isCPtr()) or (rhs_ty_tag == .Null and lhs.ty.isCPtr()))) + ((lhs_ty_tag == .Null and rhs_ty.isCPtr()) or (rhs_ty_tag == .Null and lhs_ty.isCPtr()))) { return mod.fail(&block.base, src, "TODO implement C pointer cmp", .{}); } else if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) { - const non_null_type = if (lhs_ty_tag == .Null) rhs.ty else lhs.ty; + const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty; return mod.fail(&block.base, src, "comparison of '{}' with null", .{non_null_type}); } else if (is_equality_cmp and ((lhs_ty_tag == .EnumLiteral and rhs_ty_tag == .Union) or @@ -5025,27 +5018,45 @@ fn zirCmp( if (!is_equality_cmp) { return mod.fail(&block.base, src, "{s} operator not allowed for errors", .{@tagName(op)}); } - if (rhs.value()) |rval| { - if (lhs.value()) |lval| { - // TODO optimisation oppurtunity: evaluate if std.mem.eql is faster with the names, or calling to Module.getErrorValue to get the values and then compare them is faster - return mod.constBool(sema.arena, src, std.mem.eql(u8, lval.castTag(.@"error").?.data.name, rval.castTag(.@"error").?.data.name) == (op == .eq)); + if (try sema.resolvePossiblyUndefinedValue(block, lhs_src, lhs)) |lval| { + if (try sema.resolvePossiblyUndefinedValue(block, rhs_src, rhs)) |rval| { + if (lval.isUndef() or rval.isUndef()) { + return sema.addConstUndef(Type.initTag(.bool)); + } + // TODO optimisation opportunity: evaluate if mem.eql is faster with the names, + // or calling to Module.getErrorValue to get the values and then compare them is + // faster. + const lhs_name = lval.castTag(.@"error").?.data.name; + const rhs_name = rval.castTag(.@"error").?.data.name; + if (mem.eql(u8, lhs_name, rhs_name) == (op == .eq)) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; + } } } try sema.requireRuntimeBlock(block, src); - return block.addBinOp(src, Type.initTag(.bool), if (op == .eq) .cmp_eq else .cmp_neq, lhs, rhs); - } else if (lhs.ty.isNumeric() and rhs.ty.isNumeric()) { + const tag: Air.Inst.Tag = if (op == .eq) .cmp_eq else .cmp_neq; + return block.addBinOp(tag, lhs, rhs); + } else if (lhs_ty.isNumeric() and rhs_ty.isNumeric()) { // This operation allows any combination of integer and float types, regardless of the // signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for // numeric types. - return sema.cmpNumeric(block, src, lhs, rhs, op); + return sema.cmpNumeric(block, src, lhs, rhs, op, lhs_src, rhs_src); } else if (lhs_ty_tag == .Type and rhs_ty_tag == .Type) { if (!is_equality_cmp) { return mod.fail(&block.base, src, "{s} operator not allowed for types", .{@tagName(op)}); } - return mod.constBool(sema.arena, src, lhs.value().?.eql(rhs.value().?) == (op == .eq)); + const lhs_as_type = try sema.resolveAirAsType(block, lhs_src, lhs); + const rhs_as_type = try sema.resolveAirAsType(block, rhs_src, rhs); + if (lhs_as_type.eql(rhs_as_type) == (op == .eq)) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; + } } - const instructions = &[_]Air.Inst.Index{ lhs, rhs }; + const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); if (!resolved_type.isSelfComparable(is_equality_cmp)) { return mod.fail(&block.base, src, "operator not allowed for type '{}'", .{resolved_type}); @@ -5057,15 +5068,18 @@ fn zirCmp( if (casted_lhs.value()) |lhs_val| { if (casted_rhs.value()) |rhs_val| { if (lhs_val.isUndef() or rhs_val.isUndef()) { - return sema.mod.constUndef(sema.arena, src, resolved_type); + return sema.addConstUndef(resolved_type); + } + if (lhs_val.compare(op, rhs_val)) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; } - const result = lhs_val.compare(op, rhs_val); - return sema.mod.constBool(sema.arena, src, result); } } try sema.requireRuntimeBlock(block, src); - const tag: Inst.Tag = switch (op) { + const tag: Air.Inst.Tag = switch (op) { .lt => .cmp_lt, .lte => .cmp_lte, .eq => .cmp_eq, @@ -5073,28 +5087,26 @@ fn zirCmp( .gt => .cmp_gt, .neq => .cmp_neq, }; - const bool_type = Type.initTag(.bool); // TODO handle vectors - return block.addBinOp(src, bool_type, tag, casted_lhs, casted_rhs); + // TODO handle vectors + return block.addBinOp(tag, casted_lhs, casted_rhs); } fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); const target = sema.mod.getTarget(); const abi_size = operand_ty.abiSize(target); - return sema.mod.constIntUnsigned(sema.arena, src, Type.initTag(.comptime_int), abi_size); + return sema.addIntUnsigned(Type.initTag(.comptime_int), abi_size); } fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); const target = sema.mod.getTarget(); const bit_size = operand_ty.bitSize(target); - return sema.mod.constIntUnsigned(sema.arena, src, Type.initTag(.comptime_int), bit_size); + return sema.addIntUnsigned(Type.initTag(.comptime_int), bit_size); } fn zirThis( @@ -5171,18 +5183,16 @@ fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! _ = block; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; - const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - return sema.mod.constType(sema.arena, src, operand.ty); + return sema.addType(operand.ty); } fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const src = inst_data.src(); const operand_ptr = sema.resolveInst(inst_data.operand); const elem_ty = operand_ptr.ty.elemType(); - return sema.mod.constType(sema.arena, src, elem_ty); + return sema.addType(elem_ty); } fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -5217,7 +5227,7 @@ fn zirTypeofPeer( } const result_type = try sema.resolvePeerTypes(block, src, inst_list); - return sema.mod.constType(sema.arena, src, result_type); + return sema.addType(result_type); } fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -5231,17 +5241,21 @@ fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError const bool_type = Type.initTag(.bool); const operand = try sema.coerce(block, bool_type, uncasted_operand, uncasted_operand.src); if (try sema.resolveDefinedValue(block, src, operand)) |val| { - return sema.mod.constBool(sema.arena, src, !val.toBool()); + if (val.toBool()) { + return Air.Inst.Ref.bool_false; + } else { + return Air.Inst.Ref.bool_true; + } } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, bool_type, .not, operand); + return block.addTyOp(.not, bool_type, operand); } fn zirBoolOp( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, - comptime is_bool_or: bool, + is_bool_or: bool, ) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5257,15 +5271,23 @@ fn zirBoolOp( if (lhs.value()) |lhs_val| { if (rhs.value()) |rhs_val| { if (is_bool_or) { - return sema.mod.constBool(sema.arena, src, lhs_val.toBool() or rhs_val.toBool()); + if (lhs_val.toBool() or rhs_val.toBool()) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; + } } else { - return sema.mod.constBool(sema.arena, src, lhs_val.toBool() and rhs_val.toBool()); + if (lhs_val.toBool() and rhs_val.toBool()) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; + } } } } try sema.requireRuntimeBlock(block, src); const tag: Air.Inst.Tag = if (is_bool_or) .bool_or else .bool_and; - return block.addBinOp(src, bool_type, tag, lhs, rhs); + return block.addBinOp(tag, lhs, rhs); } fn zirBoolBr( @@ -5286,7 +5308,11 @@ fn zirBoolBr( if (try sema.resolveDefinedValue(parent_block, src, lhs)) |lhs_val| { if (lhs_val.toBool() == is_bool_or) { - return sema.mod.constBool(sema.arena, src, is_bool_or); + if (is_bool_or) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; + } } // comptime-known left-hand side. No need for a block here; the result // is simply the rhs expression. Here we rely on there only being 1 @@ -5522,14 +5548,11 @@ fn analyzeRet( const fn_ty = func.owner_decl.ty; const fn_ret_ty = fn_ty.fnReturnType(); const casted_operand = try sema.coerce(block, fn_ret_ty, operand, src); - if (fn_ret_ty.zigTypeTag() == .Void) - _ = try block.addNoOp(src, Type.initTag(.noreturn), .retvoid) - else - _ = try block.addUnOp(src, Type.initTag(.noreturn), .ret, casted_operand); + _ = try block.addUnOp(.ret, casted_operand); return always_noreturn; } } - _ = try block.addUnOp(src, Type.initTag(.noreturn), .ret, operand); + _ = try block.addUnOp(.ret, operand); return always_noreturn; } @@ -5559,7 +5582,7 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne inst_data.is_volatile, inst_data.size, ); - return sema.mod.constType(sema.arena, .unneeded, ty); + return sema.addType(ty); } fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -5613,7 +5636,7 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError inst_data.flags.is_volatile, inst_data.size, ); - return sema.mod.constType(sema.arena, src, ty); + return sema.addType(ty); } fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -5794,7 +5817,7 @@ fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const struct_obj = struct_ty.castTag(.@"struct").?.data; const field = struct_obj.fields.get(field_name) orelse return sema.failWithBadFieldAccess(block, struct_obj, src, field_name); - return sema.mod.constType(sema.arena, src, field.ty); + return sema.addType(field.ty); } fn zirErrorReturnTrace( @@ -5937,7 +5960,7 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro .val = Value.initTag(.zero), }); if (!type_res.isAllowzeroPtr()) { - const is_non_zero = try block.addBinOp(src, Type.initTag(.bool), .cmp_neq, operand_coerced, zero); + const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, zero); try sema.addSafetyCheck(block, is_non_zero, .cast_to_null); } @@ -5951,12 +5974,12 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro .ty = Type.initTag(.u64), .val = Value.initPayload(&val_payload.base), }); - const remainder = try block.addBinOp(src, Type.initTag(.u64), .bit_and, operand_coerced, align_minus_1); - const is_aligned = try block.addBinOp(src, Type.initTag(.bool), .cmp_eq, remainder, zero); + const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1); + const is_aligned = try block.addBinOp(.cmp_eq, remainder, zero); try sema.addSafetyCheck(block, is_aligned, .incorrect_alignment); } } - return block.addUnOp(src, type_res, .bitcast, operand_coerced); + return block.addTyOp(.bitcast, type_res, operand_coerced); } fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -6841,7 +6864,7 @@ fn coerce( return sema.coerceVarArgParam(block, inst, inst_src); } - const inst_ty = sema.getTypeOfAirRef(inst); + const inst_ty = sema.getTypeOf(inst); // If the types are the same, we can return the operand. if (dest_type.eql(inst_ty)) return inst; @@ -6950,7 +6973,7 @@ fn coerce( (src_info.signedness == .signed and dst_info.signedness == .unsigned and dst_info.bits > src_info.bits)) { try sema.requireRuntimeBlock(block, inst_src); - return block.addUnOp(inst_src, dest_type, .intcast, inst); + return block.addTyOp(.intcast, dest_type, inst); } } }, @@ -6963,7 +6986,7 @@ fn coerce( const dst_bits = dest_type.floatBits(target); if (dst_bits >= src_bits) { try sema.requireRuntimeBlock(block, inst_src); - return block.addUnOp(inst_src, dest_type, .floatcast, inst); + return block.addTyOp(.floatcast, dest_type, inst); } } }, @@ -7062,7 +7085,7 @@ fn coerceVarArgParam( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const inst_ty = sema.getTypeOfAirRef(inst); + const inst_ty = sema.getTypeOf(inst); switch (inst_ty.zigTypeTag()) { .ComptimeInt, .ComptimeFloat => return sema.mod.fail(&block.base, inst_src, "integer and float literals in var args function must be casted", .{}), else => {}, @@ -7121,7 +7144,7 @@ fn storePtr( // TODO handle if the element type requires comptime try sema.requireRuntimeBlock(block, src); - _ = try block.addBinOp(src, Type.initTag(.void), .store, ptr, value); + _ = try block.addBinOp(.store, ptr, value); } fn bitcast( @@ -7221,7 +7244,7 @@ fn analyzeRef( } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, ptr_type, .ref, operand); + return block.addTyOp(.ref, ptr_type, operand); } fn analyzeLoad( @@ -7231,7 +7254,7 @@ fn analyzeLoad( ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, ) InnerError!Air.Inst.Ref { - const ptr_ty = sema.getTypeOfAirRef(ptr); + const ptr_ty = sema.getTypeOf(ptr); const elem_ty = switch (ptr_ty.zigTypeTag()) { .Pointer => ptr_ty.elemType(), else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr_ty}), @@ -7257,15 +7280,19 @@ fn analyzeIsNull( const result_ty = Type.initTag(.bool); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |opt_val| { if (opt_val.isUndef()) { - return sema.mod.constUndef(sema.arena, src, result_ty); + return sema.addConstUndef(result_ty); } const is_null = opt_val.isNull(); const bool_value = if (invert_logic) !is_null else is_null; - return sema.mod.constBool(sema.arena, src, bool_value); + if (bool_value) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; + } } try sema.requireRuntimeBlock(block, src); - const inst_tag: Inst.Tag = if (invert_logic) .is_non_null else .is_null; - return block.addUnOp(src, result_ty, inst_tag, operand); + const air_tag: Air.Inst.Tag = if (invert_logic) .is_non_null else .is_null; + return block.addUnOp(air_tag, operand); } fn analyzeIsNonErr( @@ -7275,18 +7302,22 @@ fn analyzeIsNonErr( operand: Air.Inst.Ref, ) InnerError!Air.Inst.Ref { const ot = operand.ty.zigTypeTag(); - if (ot != .ErrorSet and ot != .ErrorUnion) return sema.mod.constBool(sema.arena, src, true); - if (ot == .ErrorSet) return sema.mod.constBool(sema.arena, src, false); + if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true; + if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; assert(ot == .ErrorUnion); const result_ty = Type.initTag(.bool); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |err_union| { if (err_union.isUndef()) { - return sema.mod.constUndef(sema.arena, src, result_ty); + return sema.addConstUndef(result_ty); + } + if (err_union.getError() == null) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; } - return sema.mod.constBool(sema.arena, src, err_union.getError() == null); } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, result_ty, .is_non_err, operand); + return block.addUnOp(.is_non_err, operand); } fn analyzeSlice( @@ -7372,31 +7403,43 @@ fn cmpNumeric( lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, op: std.math.CompareOperator, + lhs_src: LazySrcLoc, + rhs_src: LazySrcLoc, ) InnerError!Air.Inst.Ref { - assert(lhs.ty.isNumeric()); - assert(rhs.ty.isNumeric()); + const lhs_ty = sema.getTypeOf(lhs); + const rhs_ty = sema.getTypeOf(rhs); + + assert(lhs_ty.isNumeric()); + assert(rhs_ty.isNumeric()); - const lhs_ty_tag = lhs.ty.zigTypeTag(); - const rhs_ty_tag = rhs.ty.zigTypeTag(); + const lhs_ty_tag = lhs_ty.zigTypeTag(); + const rhs_ty_tag = rhs_ty.zigTypeTag(); if (lhs_ty_tag == .Vector and rhs_ty_tag == .Vector) { - if (lhs.ty.arrayLen() != rhs.ty.arrayLen()) { + if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { return sema.mod.fail(&block.base, src, "vector length mismatch: {d} and {d}", .{ - lhs.ty.arrayLen(), - rhs.ty.arrayLen(), + lhs_ty.arrayLen(), + rhs_ty.arrayLen(), }); } return sema.mod.fail(&block.base, src, "TODO implement support for vectors in cmpNumeric", .{}); } else if (lhs_ty_tag == .Vector or rhs_ty_tag == .Vector) { return sema.mod.fail(&block.base, src, "mixed scalar and vector operands to comparison operator: '{}' and '{}'", .{ - lhs.ty, - rhs.ty, + lhs_ty, + rhs_ty, }); } - if (lhs.value()) |lhs_val| { - if (rhs.value()) |rhs_val| { - return sema.mod.constBool(sema.arena, src, Value.compare(lhs_val, op, rhs_val)); + if (try sema.resolvePossiblyUndefinedValue(block, lhs_src, lhs)) |lhs_val| { + if (try sema.resolvePossiblyUndefinedValue(block, rhs_src, rhs)) |rhs_val| { + if (lhs_val.isUndef() or rhs_val.isUndef()) { + return sema.addConstUndef(Type.initTag(.bool)); + } + if (Value.compare(lhs_val, op, rhs_val)) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; + } } } @@ -7422,19 +7465,19 @@ fn cmpNumeric( // Implicit cast the smaller one to the larger one. const dest_type = x: { if (lhs_ty_tag == .ComptimeFloat) { - break :x rhs.ty; + break :x rhs_ty; } else if (rhs_ty_tag == .ComptimeFloat) { - break :x lhs.ty; + break :x lhs_ty; } - if (lhs.ty.floatBits(target) >= rhs.ty.floatBits(target)) { - break :x lhs.ty; + if (lhs_ty.floatBits(target) >= rhs_ty.floatBits(target)) { + break :x lhs_ty; } else { - break :x rhs.ty; + break :x rhs_ty; } }; - const casted_lhs = try sema.coerce(block, dest_type, lhs, lhs.src); - const casted_rhs = try sema.coerce(block, dest_type, rhs, rhs.src); - return block.addBinOp(src, dest_type, Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); + const casted_lhs = try sema.coerce(block, dest_type, lhs, lhs_src); + const casted_rhs = try sema.coerce(block, dest_type, rhs, rhs_src); + return block.addBinOp(Air.Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } // For mixed unsigned integer sizes, implicit cast both operands to the larger integer. // For mixed signed and unsigned integers, implicit cast both operands to a signed @@ -7445,11 +7488,11 @@ fn cmpNumeric( const lhs_is_signed = if (lhs.value()) |lhs_val| lhs_val.compareWithZero(.lt) else - (lhs.ty.isFloat() or lhs.ty.isSignedInt()); + (lhs_ty.isFloat() or lhs_ty.isSignedInt()); const rhs_is_signed = if (rhs.value()) |rhs_val| rhs_val.compareWithZero(.lt) else - (rhs.ty.isFloat() or rhs.ty.isSignedInt()); + (rhs_ty.isFloat() or rhs_ty.isSignedInt()); const dest_int_is_signed = lhs_is_signed or rhs_is_signed; var dest_float_type: ?Type = null; @@ -7457,7 +7500,7 @@ fn cmpNumeric( var lhs_bits: usize = undefined; if (lhs.value()) |lhs_val| { if (lhs_val.isUndef()) - return sema.mod.constUndef(sema.arena, src, Type.initTag(.bool)); + return sema.addConstUndef(Type.initTag(.bool)); const is_unsigned = if (lhs_is_float) x: { var bigint_space: Value.BigIntSpace = undefined; var bigint = try lhs_val.toBigInt(&bigint_space).toManaged(sema.gpa); @@ -7465,8 +7508,8 @@ fn cmpNumeric( const zcmp = lhs_val.orderAgainstZero(); if (lhs_val.floatHasFraction()) { switch (op) { - .eq => return sema.mod.constBool(sema.arena, src, false), - .neq => return sema.mod.constBool(sema.arena, src, true), + .eq => return Air.Inst.Ref.bool_false, + .neq => return Air.Inst.Ref.bool_true, else => {}, } if (zcmp == .lt) { @@ -7483,16 +7526,16 @@ fn cmpNumeric( }; lhs_bits += @boolToInt(is_unsigned and dest_int_is_signed); } else if (lhs_is_float) { - dest_float_type = lhs.ty; + dest_float_type = lhs_ty; } else { - const int_info = lhs.ty.intInfo(target); + const int_info = lhs_ty.intInfo(target); lhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed); } var rhs_bits: usize = undefined; if (rhs.value()) |rhs_val| { if (rhs_val.isUndef()) - return sema.mod.constUndef(sema.arena, src, Type.initTag(.bool)); + return sema.addConstUndef(Type.initTag(.bool)); const is_unsigned = if (rhs_is_float) x: { var bigint_space: Value.BigIntSpace = undefined; var bigint = try rhs_val.toBigInt(&bigint_space).toManaged(sema.gpa); @@ -7500,8 +7543,8 @@ fn cmpNumeric( const zcmp = rhs_val.orderAgainstZero(); if (rhs_val.floatHasFraction()) { switch (op) { - .eq => return sema.mod.constBool(sema.arena, src, false), - .neq => return sema.mod.constBool(sema.arena, src, true), + .eq => return Air.Inst.Ref.bool_false, + .neq => return Air.Inst.Ref.bool_true, else => {}, } if (zcmp == .lt) { @@ -7518,9 +7561,9 @@ fn cmpNumeric( }; rhs_bits += @boolToInt(is_unsigned and dest_int_is_signed); } else if (rhs_is_float) { - dest_float_type = rhs.ty; + dest_float_type = rhs_ty; } else { - const int_info = rhs.ty.intInfo(target); + const int_info = rhs_ty.intInfo(target); rhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed); } @@ -7532,10 +7575,10 @@ fn cmpNumeric( const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned; break :blk try Module.makeIntType(sema.arena, signedness, casted_bits); }; - const casted_lhs = try sema.coerce(block, dest_type, lhs, lhs.src); - const casted_rhs = try sema.coerce(block, dest_type, rhs, rhs.src); + const casted_lhs = try sema.coerce(block, dest_type, lhs, lhs_src); + const casted_rhs = try sema.coerce(block, dest_type, rhs, rhs_src); - return block.addBinOp(src, Type.initTag(.bool), Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); + return block.addBinOp(Air.Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Index { @@ -7544,7 +7587,7 @@ fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Ins } try sema.requireRuntimeBlock(block, inst.src); - return block.addUnOp(inst.src, dest_type, .wrap_optional, inst); + return block.addTyOp(.wrap_optional, dest_type, inst); } fn wrapErrorUnion( @@ -7617,19 +7660,24 @@ fn wrapErrorUnion( // we are coercing from E to E!T if (inst.ty.zigTypeTag() == .ErrorSet) { var coerced = try sema.coerce(block, err_union.data.error_set, inst, inst.src); - return block.addUnOp(inst.src, dest_type, .wrap_errunion_err, coerced); + return block.addTyOp(.wrap_errunion_err, dest_type, coerced); } else { var coerced = try sema.coerce(block, err_union.data.payload, inst, inst.src); - return block.addUnOp(inst.src, dest_type, .wrap_errunion_payload, coerced); + return block.addTyOp(.wrap_errunion_payload, dest_type, coerced); } } -fn resolvePeerTypes(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, instructions: []Air.Inst.Index) !Type { +fn resolvePeerTypes( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + instructions: []Air.Inst.Ref, +) !Type { if (instructions.len == 0) return Type.initTag(.noreturn); if (instructions.len == 1) - return instructions[0].ty; + return sema.getTypeOf(instructions[0]); const target = sema.mod.getTarget(); @@ -7989,7 +8037,7 @@ fn enumFieldSrcLoc( } /// Returns the type of the AIR instruction. -fn getTypeOfAirRef(sema: *Sema, air_ref: Air.Inst.Ref) Type { +fn getTypeOf(sema: *Sema, air_ref: Air.Inst.Ref) Type { switch (air_ref) { .none => unreachable, .u8_type => return Type.initTag(.u8), @@ -8045,21 +8093,13 @@ fn getTypeOfAirRef(sema: *Sema, air_ref: Air.Inst.Ref) Type { .fn_ccc_void_no_args_type => return Type.initTag(.fn_ccc_void_no_args), .single_const_pointer_to_comptime_int_type => return Type.initTag(.single_const_pointer_to_comptime_int), .const_slice_u8_type => return Type.initTag(.const_slice_u8), - else => return sema.getAirType(air_ref), - } -} - -/// Asserts the AIR instruction is a `const_ty` and returns the type. -fn getAirType(sema: *Sema, air_ref: Air.Inst.Ref) Type { - var i: usize = @enumToInt(air_ref); - if (i < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; + else => {}, } - i -= Air.Inst.Ref.typed_value_map.len; + const air_index = @as(usize, @enumToInt(air_ref)) - Air.Inst.Ref.typed_value_map.len; const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); - assert(air_tags[i] == .const_ty); - return air_datas[i].ty; + assert(air_tags[air_index] == .const_ty); + return air_datas[air_index].ty; } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { @@ -8126,7 +8166,15 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } -pub fn addConstant(sema: *Sema, ty: Type, val: Value) InnerError!Air.Inst.Ref { +fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) InnerError!Air.Inst.Ref { + return sema.addConstant(ty, try Value.Tag.int_u64.create(sema.arena, int)); +} + +fn addConstUndef(sema: *Sema, ty: Type) InnerError!Air.Inst.Ref { + return sema.addConstant(ty, Value.initTag(.undef)); +} + +fn addConstant(sema: *Sema, ty: Type, val: Value) InnerError!Air.Inst.Ref { const gpa = sema.gpa; const ty_inst = try sema.addType(ty); try sema.air_values.append(gpa, val); -- cgit v1.2.3 From 3c5927fb87034affd6af56ecd5d9ae07fe23d690 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Jul 2021 12:16:48 -0700 Subject: Sema: add a strategy for handling costly source locations Now you can pass `.unneeded` for a `LazySrcLoc` and if there ended up being a compile error that needed it, you'll get `error.NeededSourceLocation`. Callsites can now exploit this error to do the expensive computation to produce a source location object and then repeat the operation. --- src/Compilation.zig | 6 +- src/Module.zig | 32 +-- src/Sema.zig | 585 ++++++++++++++++++++++++++-------------------------- 3 files changed, 317 insertions(+), 306 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index 4a442a8b67..f241ae6b10 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -148,7 +148,7 @@ emit_docs: ?EmitLoc, work_queue_wait_group: WaitGroup, astgen_wait_group: WaitGroup, -pub const InnerError = Module.InnerError; +pub const SemaError = Module.SemaError; pub const CRTFile = struct { lock: Cache.Lock, @@ -3170,7 +3170,7 @@ pub fn addCCArgs( try argv.appendSlice(comp.clang_argv); } -fn failCObj(comp: *Compilation, c_object: *CObject, comptime format: []const u8, args: anytype) InnerError { +fn failCObj(comp: *Compilation, c_object: *CObject, comptime format: []const u8, args: anytype) SemaError { @setCold(true); const err_msg = blk: { const msg = try std.fmt.allocPrint(comp.gpa, format, args); @@ -3191,7 +3191,7 @@ fn failCObjWithOwnedErrorMsg( comp: *Compilation, c_object: *CObject, err_msg: *CObject.ErrorMsg, -) InnerError { +) SemaError { @setCold(true); { const lock = comp.mutex.acquire(); diff --git a/src/Module.zig b/src/Module.zig index 3ce3c47f14..0a082313b3 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1996,7 +1996,8 @@ pub const LazySrcLoc = union(enum) { } }; -pub const InnerError = error{ OutOfMemory, AnalysisFail }; +pub const SemaError = error{ OutOfMemory, AnalysisFail }; +pub const CompileError = error{ OutOfMemory, AnalysisFail, NeededSourceLocation }; pub fn deinit(mod: *Module) void { const gpa = mod.gpa; @@ -2635,7 +2636,7 @@ pub fn mapOldZirToNew( } } -pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) InnerError!void { +pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) SemaError!void { const tracy = trace(@src()); defer tracy.end(); @@ -2735,7 +2736,7 @@ pub fn semaPkg(mod: *Module, pkg: *Package) !void { /// Regardless of the file status, will create a `Decl` so that we /// can track dependencies and re-analyze when the file becomes outdated. -pub fn semaFile(mod: *Module, file: *Scope.File) InnerError!void { +pub fn semaFile(mod: *Module, file: *Scope.File) SemaError!void { const tracy = trace(@src()); defer tracy.end(); @@ -3150,7 +3151,7 @@ pub fn scanNamespace( extra_start: usize, decls_len: u32, parent_decl: *Decl, -) InnerError!usize { +) SemaError!usize { const tracy = trace(@src()); defer tracy.end(); @@ -3197,7 +3198,7 @@ const ScanDeclIter = struct { unnamed_test_index: usize = 0, }; -fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) InnerError!void { +fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!void { const tracy = trace(@src()); defer tracy.end(); @@ -3451,7 +3452,7 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void { mod.gpa.free(kv.value); } -pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { +pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) SemaError!Air { const tracy = trace(@src()); defer tracy.end(); @@ -3804,7 +3805,7 @@ pub fn fail( src: LazySrcLoc, comptime format: []const u8, args: anytype, -) InnerError { +) CompileError { const err_msg = try mod.errMsg(scope, src, format, args); return mod.failWithOwnedErrorMsg(scope, err_msg); } @@ -3817,7 +3818,7 @@ pub fn failTok( token_index: ast.TokenIndex, comptime format: []const u8, args: anytype, -) InnerError { +) CompileError { const src = scope.srcDecl().?.tokSrcLoc(token_index); return mod.fail(scope, src, format, args); } @@ -3830,18 +3831,21 @@ pub fn failNode( node_index: ast.Node.Index, comptime format: []const u8, args: anytype, -) InnerError { +) CompileError { const src = scope.srcDecl().?.nodeSrcLoc(node_index); return mod.fail(scope, src, format, args); } -pub fn failWithOwnedErrorMsg(mod: *Module, scope: *Scope, err_msg: *ErrorMsg) InnerError { +pub fn failWithOwnedErrorMsg(mod: *Module, scope: *Scope, err_msg: *ErrorMsg) CompileError { @setCold(true); { errdefer err_msg.destroy(mod.gpa); - try mod.failed_decls.ensureCapacity(mod.gpa, mod.failed_decls.count() + 1); - try mod.failed_files.ensureCapacity(mod.gpa, mod.failed_files.count() + 1); + if (err_msg.src_loc.lazy == .unneeded) { + return error.NeededSourceLocation; + } + try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); + try mod.failed_files.ensureUnusedCapacity(mod.gpa, 1); } switch (scope.tag) { .block => { @@ -4340,7 +4344,7 @@ pub const SwitchProngSrc = union(enum) { } }; -pub fn analyzeStructFields(mod: *Module, struct_obj: *Struct) InnerError!void { +pub fn analyzeStructFields(mod: *Module, struct_obj: *Struct) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -4490,7 +4494,7 @@ pub fn analyzeStructFields(mod: *Module, struct_obj: *Struct) InnerError!void { } } -pub fn analyzeUnionFields(mod: *Module, union_obj: *Union) InnerError!void { +pub fn analyzeUnionFields(mod: *Module, union_obj: *Union) CompileError!void { const tracy = trace(@src()); defer tracy.end(); diff --git a/src/Sema.zig b/src/Sema.zig index 829dd843cc..91f81ffeed 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -61,7 +61,8 @@ const Zir = @import("Zir.zig"); const Module = @import("Module.zig"); const trace = @import("tracy.zig").trace; const Scope = Module.Scope; -const InnerError = Module.InnerError; +const CompileError = Module.CompileError; +const SemaError = Module.SemaError; const Decl = Module.Decl; const LazySrcLoc = Module.LazySrcLoc; const RangeSet = @import("RangeSet.zig"); @@ -83,7 +84,7 @@ pub fn analyzeFnBody( sema: *Sema, block: *Scope.Block, fn_body_inst: Zir.Inst.Index, -) InnerError!void { +) SemaError!void { const tags = sema.code.instructions.items(.tag); const datas = sema.code.instructions.items(.data); const body: []const Zir.Inst.Index = switch (tags[fn_body_inst]) { @@ -109,13 +110,16 @@ pub fn analyzeFnBody( }, else => unreachable, }; - _ = try sema.analyzeBody(block, body); + _ = sema.analyzeBody(block, body) catch |err| switch (err) { + error.NeededSourceLocation => unreachable, + else => |e| return e, + }; } /// Returns only the result from the body that is specified. /// Only appropriate to call when it is determined at comptime that this body /// has no peers. -fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) CompileError!Air.Inst.Ref { const break_inst = try sema.analyzeBody(block, body); const operand_ref = sema.code.instructions.items(.data)[break_inst].@"break".operand; return sema.resolveInst(operand_ref); @@ -125,7 +129,7 @@ fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) I /// return type of `analyzeBody` so that we can tail call them. /// Only appropriate to return when the instruction is known to be NoReturn /// solely based on the ZIR tag. -const always_noreturn: InnerError!Zir.Inst.Index = @as(Zir.Inst.Index, undefined); +const always_noreturn: CompileError!Zir.Inst.Index = @as(Zir.Inst.Index, undefined); /// This function is the main loop of `Sema` and it can be used in two different ways: /// * The traditional way where there are N breaks out of the block and peer type @@ -140,7 +144,7 @@ pub fn analyzeBody( sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index, -) InnerError!Zir.Inst.Index { +) CompileError!Zir.Inst.Index { // No tracy calls here, to avoid interfering with the tail call mechanism. const map = &block.sema.inst_map; @@ -541,7 +545,7 @@ pub fn analyzeBody( } } -fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const extended = sema.code.instructions.items(.data)[inst].extended; switch (extended.opcode) { // zig fmt: off @@ -638,7 +642,7 @@ fn resolveConstValue( block: *Scope.Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, -) !Value { +) CompileError!Value { return (try sema.resolveDefinedValue(block, src, air_ref)) orelse return sema.failWithNeededComptime(block, src); } @@ -648,7 +652,7 @@ fn resolveDefinedValue( block: *Scope.Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, -) !?Value { +) CompileError!?Value { if (try sema.resolvePossiblyUndefinedValue(block, src, air_ref)) |val| { if (val.isUndef()) { return sema.failWithUseOfUndef(block, src); @@ -663,7 +667,7 @@ fn resolvePossiblyUndefinedValue( block: *Scope.Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, -) !?Value { +) CompileError!?Value { const ty = sema.getTypeOf(air_ref); if (try sema.typeHasOnePossibleValue(block, src, ty)) |opv| { return opv; @@ -687,11 +691,11 @@ fn resolvePossiblyUndefinedValue( } } -fn failWithNeededComptime(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) InnerError { +fn failWithNeededComptime(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) CompileError { return sema.mod.fail(&block.base, src, "unable to resolve comptime value", .{}); } -fn failWithUseOfUndef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) InnerError { +fn failWithUseOfUndef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) CompileError { return sema.mod.fail(&block.base, src, "use of undefined value here causes undefined behavior", .{}); } @@ -733,7 +737,7 @@ pub fn resolveInstConst( block: *Scope.Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, -) InnerError!TypedValue { +) CompileError!TypedValue { const air_ref = sema.resolveInst(zir_ref); const val = try sema.resolveConstValue(block, src, air_ref); return TypedValue{ @@ -742,13 +746,13 @@ pub fn resolveInstConst( }; } -fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO implement zir_sema.zirBitcastResultPtr", .{}); } -fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = inst; const tracy = trace(@src()); defer tracy.end(); @@ -760,7 +764,7 @@ pub fn analyzeStructDecl( new_decl: *Decl, inst: Zir.Inst.Index, struct_obj: *Module.Struct, -) InnerError!void { +) SemaError!void { const extended = sema.code.instructions.items(.data)[inst].extended; assert(extended.opcode == .struct_decl); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -783,7 +787,7 @@ fn zirStructDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); @@ -854,7 +858,7 @@ fn zirEnumDecl( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1051,7 +1055,7 @@ fn zirUnionDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1115,7 +1119,7 @@ fn zirOpaqueDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1135,7 +1139,7 @@ fn zirErrorSetDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1175,7 +1179,7 @@ fn zirRetPtr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1187,7 +1191,7 @@ fn zirRetPtr( return block.addNoOp(src, ptr_type, .alloc); } -fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1200,7 +1204,7 @@ fn zirRetType( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1211,7 +1215,7 @@ fn zirRetType( return sema.addType(ret_type); } -fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1227,14 +1231,14 @@ fn ensureResultUsed( block: *Scope.Block, operand: Air.Inst.Ref, src: LazySrcLoc, -) InnerError!void { +) CompileError!void { switch (operand.ty.zigTypeTag()) { .Void, .NoReturn => return, else => return sema.mod.fail(&block.base, src, "expression value is ignored", .{}), } } -fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1247,7 +1251,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde } } -fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1281,7 +1285,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const arg_name = inst_data.get(sema.code); const arg_index = sema.next_arg_index; @@ -1304,13 +1308,13 @@ fn zirAllocExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocExtended", .{}); } -fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1333,13 +1337,13 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne }); } -fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocInferredComptime", .{}); } -fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1352,7 +1356,7 @@ fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!A return block.addNoOp(var_decl_src, ptr_type, .alloc); } -fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1371,7 +1375,7 @@ fn zirAllocInferred( block: *Scope.Block, inst: Zir.Inst.Index, inferred_alloc_ty: Type, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1395,7 +1399,7 @@ fn zirAllocInferred( return result; } -fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1421,7 +1425,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde ptr.tag = .alloc; } -fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1494,7 +1498,7 @@ fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Ind } } -fn zirValidateArrayInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirValidateArrayInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO implement Sema.zirValidateArrayInitPtr", .{}); @@ -1506,7 +1510,7 @@ fn failWithBadFieldAccess( struct_obj: *Module.Struct, field_src: LazySrcLoc, field_name: []const u8, -) InnerError { +) CompileError { const mod = sema.mod; const gpa = sema.gpa; @@ -1533,7 +1537,7 @@ fn failWithBadUnionFieldAccess( union_obj: *Module.Union, field_src: LazySrcLoc, field_name: []const u8, -) InnerError { +) CompileError { const mod = sema.mod; const gpa = sema.gpa; @@ -1554,7 +1558,7 @@ fn failWithBadUnionFieldAccess( return mod.failWithOwnedErrorMsg(&block.base, msg); } -fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1575,7 +1579,7 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return sema.storePtr(block, src, bitcasted_ptr, value); } -fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1594,7 +1598,7 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) return sema.storePtr(block, src, bitcasted_ptr, value); } -fn zirSetEvalBranchQuota(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirSetEvalBranchQuota(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const quota = try sema.resolveAlreadyCoercedInt(block, src, inst_data.operand, u32); @@ -1602,7 +1606,7 @@ fn zirSetEvalBranchQuota(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) sema.branch_quota = quota; } -fn zirStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1612,7 +1616,7 @@ fn zirStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!v return sema.storePtr(block, sema.src, ptr, value); } -fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1624,7 +1628,7 @@ fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.storePtr(block, src, ptr, value); } -fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1660,7 +1664,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.addType(param_type); } -fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1688,7 +1692,7 @@ fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.analyzeDeclRef(block, .unneeded, new_decl); } -fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1697,7 +1701,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.addIntUnsigned(Type.initTag(.comptime_int), int); } -fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1715,7 +1719,7 @@ fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! }); } -fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].float; @@ -1728,7 +1732,7 @@ fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!A }); } -fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; @@ -1742,7 +1746,7 @@ fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro }); } -fn zirCompileError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { +fn zirCompileError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1757,7 +1761,7 @@ fn zirCompileLog( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { var managed = sema.mod.compile_log_text.toManaged(sema.gpa); defer sema.mod.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); @@ -1789,7 +1793,7 @@ fn zirCompileLog( }); } -fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { +fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1799,7 +1803,7 @@ fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return always_noreturn; } -fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { +fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); const msg_inst = sema.resolveInst(inst_data.operand); @@ -1807,7 +1811,7 @@ fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Z return sema.panicWithMsg(block, src, msg_inst); } -fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1872,7 +1876,7 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } -fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1882,13 +1886,13 @@ fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirCImport", .{}); } -fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirSuspendBlock", .{}); } -fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1946,7 +1950,7 @@ fn resolveBlockBody( child_block: *Scope.Block, body: []const Zir.Inst.Index, merges: *Scope.Block.Merges, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { _ = try sema.analyzeBody(child_block, body); return sema.analyzeBlockBody(parent_block, src, child_block, merges); } @@ -1957,7 +1961,7 @@ fn analyzeBlockBody( src: LazySrcLoc, child_block: *Scope.Block, merges: *Scope.Block.Merges, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2033,7 +2037,7 @@ fn analyzeBlockBody( return &merges.block_inst.base; } -fn zirExport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirExport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -2069,13 +2073,13 @@ fn zirExport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! try sema.mod.analyzeExport(&block.base, src, export_name, decl); } -fn zirSetAlignStack(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirSetAlignStack(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirSetAlignStack", .{}); } -fn zirSetCold(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirSetCold(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const is_cold = try sema.resolveConstBool(block, operand_src, inst_data.operand); @@ -2083,19 +2087,19 @@ fn zirSetCold(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError func.is_cold = is_cold; } -fn zirSetFloatMode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirSetFloatMode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirSetFloatMode", .{}); } -fn zirSetRuntimeSafety(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirSetRuntimeSafety(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; block.want_safety = try sema.resolveConstBool(block, operand_src, inst_data.operand); } -fn zirBreakpoint(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirBreakpoint(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -2105,13 +2109,13 @@ fn zirBreakpoint(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr _ = try block.addNoOp(src, Type.initTag(.void), .breakpoint); } -fn zirFence(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirFence(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirFence", .{}); } -fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { +fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2151,7 +2155,7 @@ fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) InnerE } } -fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -2165,7 +2169,7 @@ fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError _ = try block.addDbgStmt(.unneeded, inst_data.line, inst_data.column); } -fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2173,7 +2177,7 @@ fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeDeclRef(block, src, decl); } -fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2199,7 +2203,7 @@ fn lookupInNamespace( sema: *Sema, namespace: *Scope.Namespace, ident_name: []const u8, -) InnerError!?*Decl { +) CompileError!?*Decl { const namespace_decl = namespace.getDecl(); if (namespace_decl.analysis == .file_failure) { try sema.mod.declareDeclDependency(sema.owner_decl, namespace_decl); @@ -2227,7 +2231,7 @@ fn zirCall( inst: Zir.Inst.Index, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2257,7 +2261,7 @@ fn analyzeCall( modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, args: []const Air.Inst.Ref, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { if (func.ty.zigTypeTag() != .Fn) return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func.ty}); @@ -2412,7 +2416,7 @@ fn analyzeCall( return result; } -fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2423,7 +2427,7 @@ fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.addType(ty); } -fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2435,7 +2439,7 @@ fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.addType(opt_type); } -fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const array_type = try sema.resolveType(block, src, inst_data.operand); @@ -2443,7 +2447,7 @@ fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.addType(elem_type); } -fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; @@ -2457,7 +2461,7 @@ fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.addType(vector_type); } -fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2470,7 +2474,7 @@ fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.addType(array_ty); } -fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2485,7 +2489,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) return sema.addType(array_ty); } -fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2497,7 +2501,7 @@ fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.addType(anyframe_type); } -fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2517,7 +2521,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.addType(err_union_ty); } -fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2536,7 +2540,7 @@ fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr }); } -fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2566,7 +2570,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addTyOp(.bitcast, result_ty, op_coerced); } -fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2599,7 +2603,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addTyOp(.bitcast, Type.initTag(.anyerror), op); } -fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2689,7 +2693,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn }); } -fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2703,7 +2707,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE }); } -fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; @@ -2741,7 +2745,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr }); } - if (enum_tag.value()) |enum_tag_val| { + if (try sema.resolvePossiblyUndefinedValue(block, operand_src, enum_tag)) |enum_tag_val| { if (enum_tag_val.castTag(.enum_field_index)) |enum_field_payload| { const field_index = enum_field_payload.data; switch (enum_tag.ty.tag()) { @@ -2785,7 +2789,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return block.addTyOp(.bitcast, int_tag_ty, enum_tag); } -fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const target = mod.getTarget(); const arena = sema.arena; @@ -2801,16 +2805,16 @@ fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return mod.fail(&block.base, dest_ty_src, "expected enum, found {}", .{dest_ty}); } - if (dest_ty.isNonexhaustiveEnum()) { - if (operand.value()) |int_val| { + if (try sema.resolvePossiblyUndefinedValue(block, operand_src, operand)) |int_val| { + if (dest_ty.isNonexhaustiveEnum()) { return mod.constInst(arena, src, .{ .ty = dest_ty, .val = int_val, }); } - } - - if (try sema.resolveDefinedValue(block, operand_src, operand)) |int_val| { + if (int_val.isUndef()) { + return sema.failWithUseOfUndef(block, operand_src); + } if (!dest_ty.enumHasInt(int_val, target)) { const msg = msg: { const msg = try mod.errMsg( @@ -2846,7 +2850,7 @@ fn zirOptionalPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2863,7 +2867,7 @@ fn zirOptionalPayloadPtr( const child_type = try opt_type.optionalChildAlloc(sema.arena); const child_pointer = try Module.simplePtrType(sema.arena, child_type, !optional_ptr.ty.isConstPtr(), .One); - if (optional_ptr.value()) |pointer_val| { + if (try sema.resolveDefinedValue(block, src, optional_ptr)) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); if (val.isNull()) { return sema.mod.fail(&block.base, src, "unable to unwrap null", .{}); @@ -2889,7 +2893,7 @@ fn zirOptionalPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2903,7 +2907,7 @@ fn zirOptionalPayload( const child_type = try opt_type.optionalChildAlloc(sema.arena); - if (operand.value()) |val| { + if (try sema.resolveDefinedValue(block, src, operand)) |val| { if (val.isNull()) { return sema.mod.fail(&block.base, src, "unable to unwrap null", .{}); } @@ -2927,7 +2931,7 @@ fn zirErrUnionPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2937,7 +2941,7 @@ fn zirErrUnionPayload( if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, operand.src, "expected error union type, found '{}'", .{operand.ty}); - if (operand.value()) |val| { + if (try sema.resolveDefinedValue(block, src, operand)) |val| { if (val.getError()) |name| { return sema.mod.fail(&block.base, src, "caught unexpected error '{s}'", .{name}); } @@ -2962,7 +2966,7 @@ fn zirErrUnionPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2976,7 +2980,7 @@ fn zirErrUnionPayloadPtr( const operand_pointer_ty = try Module.simplePtrType(sema.arena, operand.ty.elemType().castTag(.error_union).?.data.payload, !operand.ty.isConstPtr(), .One); - if (operand.value()) |pointer_val| { + if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); if (val.getError()) |name| { return sema.mod.fail(&block.base, src, "caught unexpected error '{s}'", .{name}); @@ -3001,7 +3005,7 @@ fn zirErrUnionPayloadPtr( } /// Value in, value out -fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3013,7 +3017,7 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner const result_ty = operand.ty.castTag(.error_union).?.data.error_set; - if (operand.value()) |val| { + if (try sema.resolveDefinedValue(block, src, operand)) |val| { assert(val.getError() != null); const data = val.castTag(.error_union).?.data; return sema.mod.constInst(sema.arena, src, .{ @@ -3027,7 +3031,7 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner } /// Pointer in, value out -fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3041,7 +3045,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In const result_ty = operand.ty.elemType().castTag(.error_union).?.data.error_set; - if (operand.value()) |pointer_val| { + if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); assert(val.getError() != null); const data = val.castTag(.error_union).?.data; @@ -3055,7 +3059,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return block.addTyOp(.unwrap_errunion_err_ptr, result_ty, operand); } -fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -3074,7 +3078,7 @@ fn zirFunc( block: *Scope.Block, inst: Zir.Inst.Index, inferred_error_set: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3125,7 +3129,7 @@ fn funcCommon( is_extern: bool, src_locs: Zir.Inst.Func.SrcLocs, opt_lib_name: ?[]const u8, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const bare_return_type = try sema.resolveType(block, ret_ty_src, zir_return_type); @@ -3266,7 +3270,7 @@ fn funcCommon( return result; } -fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3274,7 +3278,7 @@ fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air. return sema.analyzeAs(block, .unneeded, bin_inst.lhs, bin_inst.rhs); } -fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3290,13 +3294,13 @@ fn analyzeAs( src: LazySrcLoc, zir_dest_type: Zir.Inst.Ref, zir_operand: Zir.Inst.Ref, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const dest_type = try sema.resolveType(block, src, zir_dest_type); const operand = sema.resolveInst(zir_operand); return sema.coerce(block, dest_type, operand, src); } -fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3312,7 +3316,7 @@ fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(.ptrtoint, ptr); } -fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3330,7 +3334,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3343,7 +3347,7 @@ fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3358,7 +3362,7 @@ fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3371,7 +3375,7 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3414,7 +3418,7 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten int", .{}); } -fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3428,7 +3432,7 @@ fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.bitcast(block, dest_type, operand, operand_src); } -fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3471,7 +3475,7 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten float", .{}); } -fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3486,7 +3490,7 @@ fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeLoad(block, sema.src, result_ptr, sema.src); } -fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3504,7 +3508,7 @@ fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3514,7 +3518,7 @@ fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); } -fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3527,7 +3531,7 @@ fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); } -fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3540,7 +3544,7 @@ fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.analyzeSlice(block, src, array_ptr, start, null, null, .unneeded); } -fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3554,7 +3558,7 @@ fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeSlice(block, src, array_ptr, start, end, null, .unneeded); } -fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3576,7 +3580,7 @@ fn zirSwitchCapture( inst: Zir.Inst.Index, is_multi: bool, is_ref: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3595,7 +3599,7 @@ fn zirSwitchCaptureElse( block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3614,7 +3618,7 @@ fn zirSwitchBlock( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3647,7 +3651,7 @@ fn zirSwitchBlockMulti( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3684,7 +3688,7 @@ fn analyzeSwitch( multi_cases_len: usize, switch_inst: Zir.Inst.Index, src_node_offset: i32, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const gpa = sema.gpa; const mod = sema.mod; @@ -4350,20 +4354,23 @@ fn resolveSwitchItemVal( switch_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, range_expand: Module.SwitchProngSrc.RangeExpand, -) InnerError!TypedValue { +) CompileError!TypedValue { const item = sema.resolveInst(item_ref); - // We have to avoid the other helper functions here because we cannot construct a LazySrcLoc - // because we only have the switch AST node. Only if we know for sure we need to report - // a compile error do we resolve the full source locations. - if (item.value()) |val| { - if (val.isUndef()) { - const src = switch_prong_src.resolve(sema.gpa, block.src_decl, switch_node_offset, range_expand); - return sema.failWithUseOfUndef(block, src); - } + // Constructing a LazySrcLoc is costly because we only have the switch AST node. + // Only if we know for sure we need to report a compile error do we resolve the + // full source locations. + if (sema.resolveConstValue(block, .unneeded, item)) |val| { return TypedValue{ .ty = item.ty, .val = val }; + } else |err| switch (err) { + error.NeededSourceLocation => { + const src = switch_prong_src.resolve(sema.gpa, block.src_decl, switch_node_offset, range_expand); + return TypedValue{ + .ty = item.ty, + .val = try sema.resolveConstValue(block, src, item), + }; + }, + else => |e| return e, } - const src = switch_prong_src.resolve(sema.gpa, block.src_decl, switch_node_offset, range_expand); - return sema.failWithNeededComptime(block, src); } fn validateSwitchRange( @@ -4374,7 +4381,7 @@ fn validateSwitchRange( last_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, -) InnerError!void { +) CompileError!void { const first_val = (try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first)).val; const last_val = (try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last)).val; const maybe_prev_src = try range_set.add(first_val, last_val, switch_prong_src); @@ -4388,7 +4395,7 @@ fn validateSwitchItem( item_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, -) InnerError!void { +) CompileError!void { const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; const maybe_prev_src = try range_set.add(item_val, item_val, switch_prong_src); return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); @@ -4401,7 +4408,7 @@ fn validateSwitchItemEnum( item_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, -) InnerError!void { +) CompileError!void { const mod = sema.mod; const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); const field_index = item_tv.ty.enumTagFieldIndex(item_tv.val) orelse { @@ -4435,7 +4442,7 @@ fn validateSwitchDupe( maybe_prev_src: ?Module.SwitchProngSrc, switch_prong_src: Module.SwitchProngSrc, src_node_offset: i32, -) InnerError!void { +) CompileError!void { const prev_prong_src = maybe_prev_src orelse return; const mod = sema.mod; const gpa = sema.gpa; @@ -4469,7 +4476,7 @@ fn validateSwitchItemBool( item_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, -) InnerError!void { +) CompileError!void { const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; if (item_val.toBool()) { true_count.* += 1; @@ -4491,7 +4498,7 @@ fn validateSwitchItemSparse( item_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, -) InnerError!void { +) CompileError!void { const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; const kv = (try seen_values.fetchPut(item_val, switch_prong_src)) orelse return; return sema.validateSwitchDupe(block, kv.value, switch_prong_src, src_node_offset); @@ -4503,7 +4510,7 @@ fn validateSwitchNoRange( ranges_len: u32, operand_ty: Type, src_node_offset: i32, -) InnerError!void { +) CompileError!void { if (ranges_len == 0) return; @@ -4530,7 +4537,7 @@ fn validateSwitchNoRange( return sema.mod.failWithOwnedErrorMsg(&block.base, msg); } -fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; _ = extra; @@ -4539,7 +4546,7 @@ fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, src, "TODO implement zirHasField", .{}); } -fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -4562,7 +4569,7 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return Air.Inst.Ref.bool_false; } -fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4587,13 +4594,13 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.addType(file_root_decl.ty); } -fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; _ = inst; return sema.mod.fail(&block.base, sema.src, "TODO implement zirRetErrValueCode", .{}); } -fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4602,7 +4609,7 @@ fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.mod.fail(&block.base, sema.src, "TODO implement zirShl", .{}); } -fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4615,7 +4622,7 @@ fn zirBitwise( block: *Scope.Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4675,7 +4682,7 @@ fn zirBitwise( return block.addBinOp(air_tag, casted_lhs, casted_rhs); } -fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4683,7 +4690,7 @@ fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.fail(&block.base, sema.src, "TODO implement zirBitNot", .{}); } -fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4691,7 +4698,7 @@ fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayCat", .{}); } -fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4704,7 +4711,7 @@ fn zirNegate( block: *Scope.Block, inst: Zir.Inst.Index, tag_override: Zir.Inst.Tag, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4718,7 +4725,7 @@ fn zirNegate( return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src); } -fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4738,7 +4745,7 @@ fn zirOverflowArithmetic( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4757,7 +4764,7 @@ fn analyzeArithmetic( src: LazySrcLoc, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); @@ -4867,7 +4874,7 @@ fn analyzeArithmetic( return block.addBinOp(air_tag, casted_lhs, casted_rhs); } -fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4882,7 +4889,7 @@ fn zirAsm( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4966,7 +4973,7 @@ fn zirCmp( block: *Scope.Block, inst: Zir.Inst.Index, op: std.math.CompareOperator, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5091,7 +5098,7 @@ fn zirCmp( return block.addBinOp(tag, casted_lhs, casted_rhs); } -fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); @@ -5100,7 +5107,7 @@ fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.addIntUnsigned(Type.initTag(.comptime_int), abi_size); } -fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); @@ -5113,7 +5120,7 @@ fn zirThis( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirThis", .{}); } @@ -5122,7 +5129,7 @@ fn zirRetAddr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirRetAddr", .{}); } @@ -5131,12 +5138,12 @@ fn zirBuiltinSrc( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinSrc", .{}); } -fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); @@ -5179,7 +5186,7 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } } -fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; @@ -5187,7 +5194,7 @@ fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.addType(operand.ty); } -fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_ptr = sema.resolveInst(inst_data.operand); @@ -5195,13 +5202,13 @@ fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.addType(elem_ty); } -fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirTypeofLog2IntType", .{}); } -fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirLog2IntType", .{}); @@ -5211,7 +5218,7 @@ fn zirTypeofPeer( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5230,7 +5237,7 @@ fn zirTypeofPeer( return sema.addType(result_type); } -fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5256,7 +5263,7 @@ fn zirBoolOp( block: *Scope.Block, inst: Zir.Inst.Index, is_bool_or: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5295,7 +5302,7 @@ fn zirBoolBr( parent_block: *Scope.Block, inst: Zir.Inst.Index, is_bool_or: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5369,7 +5376,7 @@ fn zirIsNonNull( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5383,7 +5390,7 @@ fn zirIsNonNullPtr( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5394,7 +5401,7 @@ fn zirIsNonNullPtr( return sema.analyzeIsNull(block, src, loaded, true); } -fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5403,7 +5410,7 @@ fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeIsNonErr(block, inst_data.src(), operand); } -fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5418,7 +5425,7 @@ fn zirCondbr( sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Zir.Inst.Index { +) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5461,7 +5468,7 @@ fn zirCondbr( return always_noreturn; } -fn zirUnreachable(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { +fn zirUnreachable(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5482,7 +5489,7 @@ fn zirRetErrValue( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Zir.Inst.Index { +) CompileError!Zir.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const err_name = inst_data.get(sema.code); const src = inst_data.src(); @@ -5507,7 +5514,7 @@ fn zirRetCoerce( block: *Scope.Block, inst: Zir.Inst.Index, need_coercion: bool, -) InnerError!Zir.Inst.Index { +) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5518,7 +5525,7 @@ fn zirRetCoerce( return sema.analyzeRet(block, operand, src, need_coercion); } -fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { +fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5535,7 +5542,7 @@ fn analyzeRet( operand: Air.Inst.Ref, src: LazySrcLoc, need_coercion: bool, -) InnerError!Zir.Inst.Index { +) CompileError!Zir.Inst.Index { if (block.inlining) |inlining| { // We are inlining a function call; rewrite the `ret` as a `break`. try inlining.merges.results.append(sema.gpa, operand); @@ -5564,7 +5571,7 @@ fn floatOpAllowed(tag: Zir.Inst.Tag) bool { }; } -fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5585,7 +5592,7 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.addType(ty); } -fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5639,7 +5646,7 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.addType(ty); } -fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5653,13 +5660,13 @@ fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In }); } -fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnionInitPtr", .{}); } -fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { +fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); @@ -5772,7 +5779,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return mod.fail(&block.base, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{}); } -fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { +fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5780,7 +5787,7 @@ fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ return sema.mod.fail(&block.base, src, "TODO: Sema.zirStructInitAnon", .{}); } -fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { +fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5788,7 +5795,7 @@ fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInit", .{}); } -fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { +fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5796,13 +5803,13 @@ fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_r return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInitAnon", .{}); } -fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldTypeRef", .{}); } -fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const src = inst_data.src(); @@ -5824,7 +5831,7 @@ fn zirErrorReturnTrace( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorReturnTrace", .{}); } @@ -5833,7 +5840,7 @@ fn zirFrame( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrame", .{}); } @@ -5842,84 +5849,84 @@ fn zirFrameAddress( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameAddress", .{}); } -fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignOf", .{}); } -fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBoolToInt", .{}); } -fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirEmbedFile", .{}); } -fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorName", .{}); } -fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnaryMath", .{}); } -fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTagName", .{}); } -fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReify", .{}); } -fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTypeName", .{}); } -fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameType", .{}); } -fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameSize", .{}); } -fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFloatToInt", .{}); } -fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirIntToFloat", .{}); } -fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5982,199 +5989,199 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addTyOp(.bitcast, type_res, operand_coerced); } -fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrSetCast", .{}); } -fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPtrCast", .{}); } -fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTruncate", .{}); } -fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignCast", .{}); } -fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirClz", .{}); } -fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCtz", .{}); } -fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPopCount", .{}); } -fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirByteSwap", .{}); } -fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitReverse", .{}); } -fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivExact", .{}); } -fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivFloor", .{}); } -fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivTrunc", .{}); } -fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMod", .{}); } -fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirRem", .{}); } -fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShlExact", .{}); } -fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShrExact", .{}); } -fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitOffsetOf", .{}); } -fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirOffsetOf", .{}); } -fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCmpxchg", .{}); } -fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirSplat", .{}); } -fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReduce", .{}); } -fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShuffle", .{}); } -fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicLoad", .{}); } -fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicRmw", .{}); } -fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicStore", .{}); } -fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMulAdd", .{}); } -fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinCall", .{}); } -fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldPtrType", .{}); } -fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldParentPtr", .{}); } -fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy", .{}); } -fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset", .{}); } -fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinAsyncCall", .{}); } -fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirResume", .{}); @@ -6185,7 +6192,7 @@ fn zirAwait( block: *Scope.Block, inst: Zir.Inst.Index, is_nosuspend: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -6197,7 +6204,7 @@ fn zirVarExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const src = sema.src; const ty_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at type @@ -6263,7 +6270,7 @@ fn zirFuncExtended( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -6330,7 +6337,7 @@ fn zirCUndef( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCUndef", .{}); @@ -6340,7 +6347,7 @@ fn zirCInclude( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCInclude", .{}); @@ -6350,7 +6357,7 @@ fn zirCDefine( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCDefine", .{}); @@ -6360,7 +6367,7 @@ fn zirWasmMemorySize( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemorySize", .{}); @@ -6370,7 +6377,7 @@ fn zirWasmMemoryGrow( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemoryGrow", .{}); @@ -6380,7 +6387,7 @@ fn zirBuiltinExtern( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinExtern", .{}); @@ -6556,7 +6563,7 @@ fn namedFieldPtr( object_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; @@ -6706,7 +6713,7 @@ fn analyzeNamespaceLookup( src: LazySrcLoc, namespace: *Scope.Namespace, decl_name: []const u8, -) InnerError!?Air.Inst.Ref { +) CompileError!?Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; if (try sema.lookupInNamespace(namespace, decl_name)) |decl| { @@ -6734,7 +6741,7 @@ fn analyzeStructFieldPtr( field_name: []const u8, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; assert(unresolved_struct_ty.zigTypeTag() == .Struct); @@ -6769,7 +6776,7 @@ fn analyzeUnionFieldPtr( field_name: []const u8, field_name_src: LazySrcLoc, unresolved_union_ty: Type, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; assert(unresolved_union_ty.zigTypeTag() == .Union); @@ -6805,7 +6812,7 @@ fn elemPtr( array_ptr: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const array_ty = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, array_ptr.src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -6832,7 +6839,7 @@ fn elemPtrArray( array_ptr: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { if (array_ptr.value()) |array_ptr_val| { if (elem_index.value()) |index_val| { // Both array pointer and index are compile-time known. @@ -6859,7 +6866,7 @@ fn coerce( dest_type: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { if (dest_type.tag() == .var_args_param) { return sema.coerceVarArgParam(block, inst, inst_src); } @@ -7041,7 +7048,7 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult return .no_match; } -fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) InnerError!?Air.Inst.Index { +fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) CompileError!?Air.Inst.Index { const val = inst.value() orelse return null; const src_zig_tag = inst.ty.zigTypeTag(); const dst_zig_tag = dest_type.zigTypeTag(); @@ -7153,7 +7160,7 @@ fn bitcast( dest_type: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { // Keep the comptime Value representation; take the new type. return sema.addConstant(dest_type, val); @@ -7163,7 +7170,7 @@ fn bitcast( return block.addTyOp(.bitcast, dest_type, inst); } -fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) InnerError!Air.Inst.Ref { +fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) CompileError!Air.Inst.Ref { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7179,12 +7186,12 @@ fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); } -fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Ref { +fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) CompileError!Air.Inst.Ref { const decl_ref = try sema.analyzeDeclRef(block, src, decl); return sema.analyzeLoad(block, src, decl_ref, src); } -fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Ref { +fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) CompileError!Air.Inst.Ref { try sema.mod.declareDeclDependency(sema.owner_decl, decl); sema.mod.ensureDeclAnalyzed(decl) catch |err| { if (sema.func) |func| { @@ -7205,7 +7212,7 @@ fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl ); } -fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!Air.Inst.Ref { +fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) CompileError!Air.Inst.Ref { const variable = tv.val.castTag(.variable).?.data; const ty = try Module.simplePtrType(sema.arena, tv.ty, variable.is_mutable, .One); @@ -7233,7 +7240,7 @@ fn analyzeRef( block: *Scope.Block, src: LazySrcLoc, operand: Air.Inst.Ref, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const ptr_type = try sema.mod.simplePtrType(sema.arena, operand.ty, false, .One); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |val| { @@ -7253,7 +7260,7 @@ fn analyzeLoad( src: LazySrcLoc, ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const ptr_ty = sema.getTypeOf(ptr); const elem_ty = switch (ptr_ty.zigTypeTag()) { .Pointer => ptr_ty.elemType(), @@ -7276,7 +7283,7 @@ fn analyzeIsNull( src: LazySrcLoc, operand: Air.Inst.Ref, invert_logic: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const result_ty = Type.initTag(.bool); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |opt_val| { if (opt_val.isUndef()) { @@ -7300,7 +7307,7 @@ fn analyzeIsNonErr( block: *Scope.Block, src: LazySrcLoc, operand: Air.Inst.Ref, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const ot = operand.ty.zigTypeTag(); if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true; if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; @@ -7329,7 +7336,7 @@ fn analyzeSlice( end_opt: ?Air.Inst.Index, sentinel_opt: ?Air.Inst.Index, sentinel_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const ptr_child = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -7405,7 +7412,7 @@ fn cmpNumeric( op: std.math.CompareOperator, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const lhs_ty = sema.getTypeOf(lhs); const rhs_ty = sema.getTypeOf(rhs); @@ -7746,7 +7753,7 @@ fn resolvePeerTypes( return chosen.ty; } -fn resolveTypeFields(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Type) InnerError!Type { +fn resolveTypeFields(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Type) CompileError!Type { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; @@ -7798,7 +7805,7 @@ fn resolveBuiltinTypeFields( block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!Type { +) CompileError!Type { const resolved_ty = try sema.getBuiltinType(block, src, name); return sema.resolveTypeFields(block, src, resolved_ty); } @@ -7808,7 +7815,7 @@ fn getBuiltin( block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const mod = sema.mod; const std_pkg = mod.root_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; @@ -7834,7 +7841,7 @@ fn getBuiltinType( block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!Type { +) CompileError!Type { const ty_inst = try sema.getBuiltin(block, src, name); return sema.resolveAirAsType(block, src, ty_inst); } @@ -7848,7 +7855,7 @@ fn typeHasOnePossibleValue( block: *Scope.Block, src: LazySrcLoc, starting_type: Type, -) InnerError!?Value { +) CompileError!?Value { var ty = starting_type; while (true) switch (ty.tag()) { .f16, @@ -7986,7 +7993,7 @@ fn typeHasOnePossibleValue( }; } -fn getAstTree(sema: *Sema, block: *Scope.Block) InnerError!*const std.zig.ast.Tree { +fn getAstTree(sema: *Sema, block: *Scope.Block) CompileError!*const std.zig.ast.Tree { return block.src_decl.namespace.file_scope.getTree(sema.gpa) catch |err| { log.err("unable to load AST to report compile error: {s}", .{@errorName(err)}); return error.AnalysisFail; @@ -8166,15 +8173,15 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } -fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) InnerError!Air.Inst.Ref { +fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { return sema.addConstant(ty, try Value.Tag.int_u64.create(sema.arena, int)); } -fn addConstUndef(sema: *Sema, ty: Type) InnerError!Air.Inst.Ref { +fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { return sema.addConstant(ty, Value.initTag(.undef)); } -fn addConstant(sema: *Sema, ty: Type, val: Value) InnerError!Air.Inst.Ref { +fn addConstant(sema: *Sema, ty: Type, val: Value) CompileError!Air.Inst.Ref { const gpa = sema.gpa; const ty_inst = try sema.addType(ty); try sema.air_values.append(gpa, val); -- cgit v1.2.3 From 27be4f31402557972ae28d552f4ec4617357d454 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Jul 2021 19:04:02 -0700 Subject: Sema: more AIR memory layout reworking progress Additionally: ZIR encoding for floats now supports float literals up to f64, not only f32. This is because we no longer need a source location for this instruction. --- src/Air.zig | 11 +- src/AstGen.zig | 13 +- src/Module.zig | 32 ++ src/Sema.zig | 928 +++++++++++++++++++++++++++------------------------------ src/Zir.zig | 19 +- 5 files changed, 486 insertions(+), 517 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Air.zig b/src/Air.zig index 1f294c43f3..e2eeae1130 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -94,6 +94,11 @@ pub const Inst = struct { bitcast, /// Uses the `ty_pl` field with payload `Block`. block, + /// A labeled block of code that loops forever. At the end of the body it is implied + /// to repeat; no explicit "repeat" instruction terminates loop bodies. + /// Result type is always noreturn; no instructions in a block follow this one. + /// Uses the `ty_pl` field. Payload is `Block`. + loop, /// Return from a block with a result. /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `br` field. @@ -181,11 +186,6 @@ pub const Inst = struct { /// Read a value from a pointer. /// Uses the `ty_op` field. load, - /// A labeled block of code that loops forever. At the end of the body it is implied - /// to repeat; no explicit "repeat" instruction terminates loop bodies. - /// Result type is always noreturn; no instructions in a block follow this one. - /// Uses the `ty_pl` field. Payload is `Block`. - loop, /// Converts a pointer to its address. Result type is always `usize`. /// Uses the `un_op` field. ptrtoint, @@ -279,6 +279,7 @@ pub const Inst = struct { /// this union. `Tag` determines which union field is active, as well as /// how to interpret the data within. pub const Data = union { + no_op: void, un_op: Ref, bin_op: struct { lhs: Ref, diff --git a/src/AstGen.zig b/src/AstGen.zig index a8510365a9..1b58b3f2f7 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -6589,12 +6589,12 @@ fn floatLiteral(gz: *GenZir, rl: ResultLoc, node: ast.Node.Index) InnerError!Zir } else std.fmt.parseFloat(f128, bytes) catch |err| switch (err) { error.InvalidCharacter => unreachable, // validated by tokenizer }; - // If the value fits into a f32 without losing any precision, store it that way. + // If the value fits into a f64 without losing any precision, store it that way. @setFloatMode(.Strict); - const smaller_float = @floatCast(f32, float_number); + const smaller_float = @floatCast(f64, float_number); const bigger_again: f128 = smaller_float; if (bigger_again == float_number) { - const result = try gz.addFloat(smaller_float, node); + const result = try gz.addFloat(smaller_float); return rvalue(gz, rl, result, node); } // We need to use 128 bits. Break the float into 4 u32 values so we can @@ -9145,13 +9145,10 @@ const GenZir = struct { return indexToRef(new_index); } - fn addFloat(gz: *GenZir, number: f32, src_node: ast.Node.Index) !Zir.Inst.Ref { + fn addFloat(gz: *GenZir, number: f64) !Zir.Inst.Ref { return gz.add(.{ .tag = .float, - .data = .{ .float = .{ - .src_node = gz.nodeIndexToRelative(src_node), - .number = number, - } }, + .data = .{ .float = number }, }); } diff --git a/src/Module.zig b/src/Module.zig index 0a082313b3..4bd48dad05 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1226,6 +1226,17 @@ pub const Scope = struct { return block.src_decl.namespace.file_scope; } + pub fn addTy( + block: *Block, + tag: Air.Inst.Tag, + ty: Type, + ) error{OutOfMemory}!Air.Inst.Ref { + return block.addInst(.{ + .tag = tag, + .data = .{ .ty = ty }, + }); + } + pub fn addTyOp( block: *Block, tag: Air.Inst.Tag, @@ -1241,6 +1252,13 @@ pub const Scope = struct { }); } + pub fn addNoOp(block: *Block, tag: Air.Inst.Tag) error{OutOfMemory}!Air.Inst.Ref { + return block.addInst(.{ + .tag = tag, + .data = .no_op, + }); + } + pub fn addUnOp( block: *Block, tag: Air.Inst.Tag, @@ -1252,6 +1270,20 @@ pub const Scope = struct { }); } + pub fn addBr( + block: *Block, + target_block: Air.Inst.Index, + operand: Air.Inst.Ref, + ) error{OutOfMemory}!Air.Inst.Ref { + return block.addInst(.{ + .tag = .br, + .data = .{ .br = .{ + .block_inst = target_block, + .operand = operand, + } }, + }); + } + pub fn addBinOp( block: *Block, tag: Air.Inst.Tag, diff --git a/src/Sema.zig b/src/Sema.zig index 31d3c9551d..48ad8d97fc 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -372,14 +372,14 @@ pub fn analyzeBody( //.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), //.error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), - //.add => try sema.zirArithmetic(block, inst), - //.addwrap => try sema.zirArithmetic(block, inst), - //.div => try sema.zirArithmetic(block, inst), - //.mod_rem => try sema.zirArithmetic(block, inst), - //.mul => try sema.zirArithmetic(block, inst), - //.mulwrap => try sema.zirArithmetic(block, inst), - //.sub => try sema.zirArithmetic(block, inst), - //.subwrap => try sema.zirArithmetic(block, inst), + .add => try sema.zirArithmetic(block, inst), + .addwrap => try sema.zirArithmetic(block, inst), + .div => try sema.zirArithmetic(block, inst), + .mod_rem => try sema.zirArithmetic(block, inst), + .mul => try sema.zirArithmetic(block, inst), + .mulwrap => try sema.zirArithmetic(block, inst), + .sub => try sema.zirArithmetic(block, inst), + .subwrap => try sema.zirArithmetic(block, inst), //// Instructions that we know to *always* be noreturn based solely on their tag. //// These functions match the return type of analyzeBody so that we can @@ -505,35 +505,35 @@ pub fn analyzeBody( i = 0; continue; }, - //.block_inline => blk: { - // // Directly analyze the block body without introducing a new block. - // const inst_data = datas[inst].pl_node; - // const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); - // const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; - // const break_inst = try sema.analyzeBody(block, inline_body); - // const break_data = datas[break_inst].@"break"; - // if (inst == break_data.block_inst) { - // break :blk sema.resolveInst(break_data.operand); - // } else { - // return break_inst; - // } - //}, - //.condbr_inline => blk: { - // const inst_data = datas[inst].pl_node; - // const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; - // const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); - // const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; - // const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - // const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); - // const inline_body = if (cond.val.toBool()) then_body else else_body; - // const break_inst = try sema.analyzeBody(block, inline_body); - // const break_data = datas[break_inst].@"break"; - // if (inst == break_data.block_inst) { - // break :blk sema.resolveInst(break_data.operand); - // } else { - // return break_inst; - // } - //}, + .block_inline => blk: { + // Directly analyze the block body without introducing a new block. + const inst_data = datas[inst].pl_node; + const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); + const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; + const break_inst = try sema.analyzeBody(block, inline_body); + const break_data = datas[break_inst].@"break"; + if (inst == break_data.block_inst) { + break :blk sema.resolveInst(break_data.operand); + } else { + return break_inst; + } + }, + .condbr_inline => blk: { + const inst_data = datas[inst].pl_node; + const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; + const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); + const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; + const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; + const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); + const inline_body = if (cond.val.toBool()) then_body else else_body; + const break_inst = try sema.analyzeBody(block, inline_body); + const break_data = datas[break_inst].@"break"; + if (inst == break_data.block_inst) { + break :blk sema.resolveInst(break_data.operand); + } else { + return break_inst; + } + }, else => @panic("TODO finish updating Sema for AIR memory layout changes and then remove this else prong"), }; if (sema.getTypeOf(air_inst).isNoReturn()) @@ -1186,7 +1186,7 @@ fn zirRetPtr( const fn_ty = sema.func.?.owner_decl.ty; const ret_type = fn_ty.fnReturnType(); const ptr_type = try Module.simplePtrType(sema.arena, ret_type, true, .One); - return block.addNoOp(src, ptr_type, .alloc); + return block.addTy(.alloc, ptr_type); } fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -1230,7 +1230,8 @@ fn ensureResultUsed( operand: Air.Inst.Ref, src: LazySrcLoc, ) CompileError!void { - switch (operand.ty.zigTypeTag()) { + const operand_ty = sema.getTypeOf(operand); + switch (operand_ty.zigTypeTag()) { .Void, .NoReturn => return, else => return sema.mod.fail(&block.base, src, "expression value is ignored", .{}), } @@ -1243,7 +1244,8 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); - switch (operand.ty.zigTypeTag()) { + const operand_ty = sema.getTypeOf(operand); + switch (operand_ty.zigTypeTag()) { .ErrorSet, .ErrorUnion => return sema.mod.fail(&block.base, src, "error is discarded", .{}), else => return, } @@ -1257,7 +1259,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Co const src = inst_data.src(); const array_ptr = sema.resolveInst(inst_data.operand); - const elem_ty = array_ptr.ty.elemType(); + const elem_ty = sema.getTypeOf(array_ptr).elemType(); if (!elem_ty.isIndexable()) { const cond_src: LazySrcLoc = .{ .node_offset_for_cond = inst_data.src_node }; const msg = msg: { @@ -1317,7 +1319,6 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Comp defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_type = try sema.resolveType(block, ty_src, inst_data.operand); const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); @@ -1329,10 +1330,7 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Comp .val = undefined, // astgen guarantees there will be a store before the first load }, }; - return sema.mod.constInst(sema.arena, src, .{ - .ty = ptr_type, - .val = Value.initPayload(&val_payload.base), - }); + return sema.addConstant(ptr_type, Value.initPayload(&val_payload.base)); } fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -1351,7 +1349,7 @@ fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError const var_type = try sema.resolveType(block, ty_src, inst_data.operand); const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); try sema.requireRuntimeBlock(block, var_decl_src); - return block.addNoOp(var_decl_src, ptr_type, .alloc); + return block.addTy(.alloc, ptr_type); } fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -1365,7 +1363,7 @@ fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr try sema.validateVarType(block, ty_src, var_type); const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); try sema.requireRuntimeBlock(block, var_decl_src); - return block.addNoOp(var_decl_src, ptr_type, .alloc); + return block.addTy(.alloc, ptr_type); } fn zirAllocInferred( @@ -1388,12 +1386,9 @@ fn zirAllocInferred( // not needed in the case of constant values. However here, we plan to "downgrade" // to a normal instruction when we hit `resolve_inferred_alloc`. So we append // to the block even though it is currently a `.constant`. - const result = try sema.mod.constInst(sema.arena, src, .{ - .ty = inferred_alloc_ty, - .val = Value.initPayload(&val_payload.base), - }); + const result = try sema.addConstant(inferred_alloc_ty, Value.initPayload(&val_payload.base)); try sema.requireFunctionBlock(block, src); - try block.instructions.append(sema.gpa, result); + try block.instructions.append(sema.gpa, refToIndex(result).?); return result; } @@ -1630,18 +1625,21 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE const tracy = trace(@src()); defer tracy.end(); - const src: LazySrcLoc = .unneeded; + const src = sema.src; + const fn_inst_src = sema.src; + const inst_data = sema.code.instructions.items(.data)[inst].param_type; const fn_inst = sema.resolveInst(inst_data.callee); + const fn_inst_ty = sema.getTypeOf(fn_inst); const param_index = inst_data.param_index; - const fn_ty: Type = switch (fn_inst.ty.zigTypeTag()) { - .Fn => fn_inst.ty, + const fn_ty: Type = switch (fn_inst_ty.zigTypeTag()) { + .Fn => fn_inst_ty, .BoundFn => { - return sema.mod.fail(&block.base, fn_inst.src, "TODO implement zirParamType for method call syntax", .{}); + return sema.mod.fail(&block.base, fn_inst_src, "TODO implement zirParamType for method call syntax", .{}); }, else => { - return sema.mod.fail(&block.base, fn_inst.src, "expected function, found '{}'", .{fn_inst.ty}); + return sema.mod.fail(&block.base, fn_inst_src, "expected function, found '{}'", .{fn_inst_ty}); }, }; @@ -1711,23 +1709,20 @@ fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErro const limbs = try arena.alloc(std.math.big.Limb, int.len); mem.copy(u8, mem.sliceAsBytes(limbs), limb_bytes); - return sema.mod.constInst(arena, .unneeded, .{ - .ty = Type.initTag(.comptime_int), - .val = try Value.Tag.int_big_positive.create(arena, limbs), - }); + return sema.addConstant( + Type.initTag(.comptime_int), + try Value.Tag.int_big_positive.create(arena, limbs), + ); } fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const arena = sema.arena; - const inst_data = sema.code.instructions.items(.data)[inst].float; - const src = inst_data.src(); - const number = inst_data.number; - - return sema.mod.constInst(arena, src, .{ - .ty = Type.initTag(.comptime_float), - .val = try Value.Tag.float_32.create(arena, number), - }); + const number = sema.code.instructions.items(.data)[inst].float; + return sema.addConstant( + Type.initTag(.comptime_float), + try Value.Tag.float_64.create(arena, number), + ); } fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -1735,13 +1730,11 @@ fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data; - const src = inst_data.src(); const number = extra.get(); - - return sema.mod.constInst(arena, src, .{ - .ty = Type.initTag(.comptime_float), - .val = try Value.Tag.float_128.create(arena, number), - }); + return sema.addConstant( + Type.initTag(.comptime_float), + try Value.Tag.float_128.create(arena, number), + ); } fn zirCompileError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { @@ -1785,10 +1778,7 @@ fn zirCompileLog( if (!gop.found_existing) { gop.value_ptr.* = src_node; } - return sema.mod.constInst(sema.arena, src, .{ - .ty = Type.initTag(.void), - .val = Value.initTag(.void_value), - }); + return Air.Inst.Ref.void_value; } fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { @@ -1817,18 +1807,26 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Compil const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; + const gpa = sema.gpa; // AIR expects a block outside the loop block too. - const block_inst = try sema.arena.create(Inst.Block); - block_inst.* = .{ - .base = .{ - .tag = Inst.Block.base_tag, - .ty = undefined, - .src = src, - }, - .body = undefined, - }; - + // Reserve space for a Loop instruction so that generated Break instructions can + // point to it, even if it doesn't end up getting used because the code ends up being + // comptime evaluated. + const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const loop_inst = block_inst + 1; + try sema.air_instructions.ensureUnusedCapacity(gpa, 2); + sema.air_instructions.appendAssumeCapacity(.{ + .tag = .block, + .data = undefined, + }); + sema.air_instructions.appendAssumeCapacity(.{ + .tag = .loop, + .data = .{ .ty_pl = .{ + .ty = .noreturn_type, + .payload = undefined, + } }, + }); var label: Scope.Block.Label = .{ .zir_block = inst, .merges = .{ @@ -1844,33 +1842,24 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Compil child_block.runtime_index += 1; const merges = &child_block.label.?.merges; - defer child_block.instructions.deinit(sema.gpa); - defer merges.results.deinit(sema.gpa); - defer merges.br_list.deinit(sema.gpa); - - // Reserve space for a Loop instruction so that generated Break instructions can - // point to it, even if it doesn't end up getting used because the code ends up being - // comptime evaluated. - const loop_inst = try sema.arena.create(Inst.Loop); - loop_inst.* = .{ - .base = .{ - .tag = Inst.Loop.base_tag, - .ty = Type.initTag(.noreturn), - .src = src, - }, - .body = undefined, - }; + defer child_block.instructions.deinit(gpa); + defer merges.results.deinit(gpa); + defer merges.br_list.deinit(gpa); var loop_block = child_block.makeSubBlock(); - defer loop_block.instructions.deinit(sema.gpa); + defer loop_block.instructions.deinit(gpa); _ = try sema.analyzeBody(&loop_block, body); // Loop repetition is implied so the last instruction may or may not be a noreturn instruction. + try child_block.instructions.append(gpa, loop_inst); - try child_block.instructions.append(sema.gpa, &loop_inst.base); - loop_inst.body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, loop_block.instructions.items) }; - + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + + loop_block.instructions.items.len); + sema.air_instructions.items(.data)[loop_inst].ty_pl.payload = sema.addExtraAssumeCapacity( + Air.Block{ .body_len = @intCast(u32, loop_block.instructions.items.len) }, + ); + sema.air_extra.appendAssumeCapacity(loop_block.instructions.items); return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } @@ -1890,27 +1879,28 @@ fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirSuspendBlock", .{}); } -fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { +fn zirBlock( + sema: *Sema, + parent_block: *Scope.Block, + inst: Zir.Inst.Index, +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); - const inst_data = sema.code.instructions.items(.data)[inst].pl_node; - const src = inst_data.src(); - const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); + const pl_node = sema.code.instructions.items(.data)[inst].pl_node; + const src = pl_node.src(); + const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; + const gpa = sema.gpa; // Reserve space for a Block instruction so that generated Break instructions can // point to it, even if it doesn't end up getting used because the code ends up being // comptime evaluated. - const block_inst = try sema.arena.create(Inst.Block); - block_inst.* = .{ - .base = .{ - .tag = Inst.Block.base_tag, - .ty = undefined, // Set after analysis. - .src = src, - }, - .body = undefined, - }; + const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + try sema.air_instructions.append(gpa, .{ + .tag = .block, + .data = undefined, + }); var label: Scope.Block.Label = .{ .zir_block = inst, @@ -1932,9 +1922,9 @@ fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Compi }; const merges = &child_block.label.?.merges; - defer child_block.instructions.deinit(sema.gpa); - defer merges.results.deinit(sema.gpa); - defer merges.br_list.deinit(sema.gpa); + defer child_block.instructions.deinit(gpa); + defer merges.results.deinit(gpa); + defer merges.br_list.deinit(gpa); _ = try sema.analyzeBody(&child_block, body); @@ -1963,6 +1953,8 @@ fn analyzeBlockBody( const tracy = trace(@src()); defer tracy.end(); + const gpa = sema.gpa; + // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); assert(child_block.instructions.items[child_block.instructions.items.len - 1].ty.isNoReturn()); @@ -1971,7 +1963,7 @@ fn analyzeBlockBody( // No need for a block instruction. We can put the new instructions // directly into the parent block. const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items); - try parent_block.instructions.appendSlice(sema.gpa, copied_instructions); + try parent_block.instructions.appendSlice(gpa, copied_instructions); return copied_instructions[copied_instructions.len - 1]; } if (merges.results.items.len == 1) { @@ -1982,7 +1974,7 @@ fn analyzeBlockBody( // No need for a block instruction. We can put the new instructions directly // into the parent block. Here we omit the break instruction. const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items[0..last_inst_index]); - try parent_block.instructions.appendSlice(sema.gpa, copied_instructions); + try parent_block.instructions.appendSlice(gpa, copied_instructions); return merges.results.items[0]; } } @@ -1992,21 +1984,26 @@ fn analyzeBlockBody( // Need to set the type and emit the Block instruction. This allows machine code generation // to emit a jump instruction to after the block when it encounters the break. - try parent_block.instructions.append(sema.gpa, &merges.block_inst.base); + try parent_block.instructions.append(gpa, merges.block_inst); const resolved_ty = try sema.resolvePeerTypes(parent_block, src, merges.results.items); - merges.block_inst.base.ty = resolved_ty; - merges.block_inst.body = .{ - .instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items), - }; + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + + child_block.instructions.items.len); + sema.air_instructions.items(.data)[merges.block_inst] = .{ .ty_pl = .{ + .ty = try sema.addType(resolved_ty), + .payload = sema.addExtraAssumeCapacity(Air.Block{ + .body_len = @intCast(u32, child_block.instructions.items.len), + }), + } }; + sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items); // Now that the block has its type resolved, we need to go back into all the break // instructions, and insert type coercion on the operands. for (merges.br_list.items) |br| { - if (br.operand.ty.eql(resolved_ty)) { + if (sema.getTypeOf(br.operand).eql(resolved_ty)) { // No type coercion needed. continue; } var coerce_block = parent_block.makeSubBlock(); - defer coerce_block.instructions.deinit(sema.gpa); + defer coerce_block.instructions.deinit(gpa); const coerced_operand = try sema.coerce(&coerce_block, resolved_ty, br.operand, br.operand.src); // If no instructions were produced, such as in the case of a coercion of a // constant value to a new type, we can simply point the br operand to it. @@ -2032,7 +2029,7 @@ fn analyzeBlockBody( }, }; } - return &merges.block_inst.base; + return indexToRef(merges.block_inst); } fn zirExport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { @@ -2104,7 +2101,7 @@ fn zirBreakpoint(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compile const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; try sema.requireRuntimeBlock(block, src); - _ = try block.addNoOp(src, Type.initTag(.void), .breakpoint); + _ = try block.addNoOp(.breakpoint); } fn zirFence(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { @@ -2311,6 +2308,8 @@ fn analyzeCall( }), } + const gpa = sema.gpa; + const ret_type = func.ty.fnReturnType(); const is_comptime_call = block.is_comptime or modifier == .compile_time; @@ -2331,15 +2330,11 @@ fn analyzeCall( // set to in the `Scope.Block`. // This block instruction will be used to capture the return value from the // inlined function. - const block_inst = try sema.arena.create(Inst.Block); - block_inst.* = .{ - .base = .{ - .tag = Inst.Block.base_tag, - .ty = ret_type, - .src = call_src, - }, - .body = undefined, - }; + const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + try sema.air_instructions.append(gpa, .{ + .tag = .block, + .data = undefined, + }); // This one is shared among sub-blocks within the same callee, but not // shared among the entire inline/comptime call stack. var inlining: Scope.Block.Inlining = .{ @@ -2358,7 +2353,7 @@ fn analyzeCall( const parent_inst_map = sema.inst_map; sema.inst_map = .{}; defer { - sema.inst_map.deinit(sema.gpa); + sema.inst_map.deinit(gpa); sema.inst_map = parent_inst_map; } @@ -2390,9 +2385,9 @@ fn analyzeCall( const merges = &child_block.inlining.?.merges; - defer child_block.instructions.deinit(sema.gpa); - defer merges.results.deinit(sema.gpa); - defer merges.br_list.deinit(sema.gpa); + defer child_block.instructions.deinit(gpa); + defer merges.results.deinit(gpa); + defer merges.br_list.deinit(gpa); try sema.emitBackwardBranch(&child_block, call_src); @@ -2525,17 +2520,16 @@ fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compile defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const src = inst_data.src(); // Create an anonymous error set type with only this error value, and return the value. const kv = try sema.mod.getErrorValue(inst_data.get(sema.code)); const result_type = try Type.Tag.error_set_single.create(sema.arena, kv.key); - return sema.mod.constInst(sema.arena, src, .{ - .ty = result_type, - .val = try Value.Tag.@"error".create(sema.arena, .{ + return sema.addConstant( + result_type, + try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key, }), - }); + ); } fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -2558,10 +2552,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compile .base = .{ .tag = .int_u64 }, .data = (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, }; - return sema.mod.constInst(sema.arena, src, .{ - .ty = result_ty, - .val = Value.initPayload(&payload.base), - }); + return sema.addConstant(result_ty, Value.initPayload(&payload.base)); } try sema.requireRuntimeBlock(block, src); @@ -2587,10 +2578,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compile .base = .{ .tag = .@"error" }, .data = .{ .name = sema.mod.error_name_list.items[@intCast(usize, int)] }, }; - return sema.mod.constInst(sema.arena, src, .{ - .ty = Type.initTag(.anyerror), - .val = Value.initPayload(&payload.base), - }); + return sema.addConstant(Type.initTag(.anyerror), Value.initPayload(&payload.base)); } try sema.requireRuntimeBlock(block, src); if (block.wantSafety()) { @@ -2630,10 +2618,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Com // Anything merged with anyerror is anyerror. if (lhs_ty.tag() == .anyerror or rhs_ty.tag() == .anyerror) { - return sema.mod.constInst(sema.arena, src, .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyerror_type), - }); + return Air.Inst.Ref.anyerror_type; } // When we support inferred error sets, we'll want to use a data structure that can // represent a merged set of errors without forcing them to be resolved here. Until then @@ -2685,10 +2670,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Com .names_len = @intCast(u32, new_names.len), }; const error_set_ty = try Type.Tag.error_set.create(sema.arena, new_error_set); - return sema.mod.constInst(sema.arena, src, .{ - .ty = Type.initTag(.type), - .val = try Value.Tag.ty.create(sema.arena, error_set_ty), - }); + return sema.addConstant(Type.initTag(.type), try Value.Tag.ty.create(sema.arena, error_set_ty)); } fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -2697,12 +2679,11 @@ fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compil defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const src = inst_data.src(); const duped_name = try sema.arena.dupe(u8, inst_data.get(sema.code)); - return sema.mod.constInst(sema.arena, src, .{ - .ty = Type.initTag(.enum_literal), - .val = try Value.Tag.enum_literal.create(sema.arena, duped_name), - }); + return sema.addConstant( + Type.initTag(.enum_literal), + try Value.Tag.enum_literal.create(sema.arena, duped_name), + ); } fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -2712,11 +2693,12 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = sema.resolveInst(inst_data.operand); + const operand_ty = sema.getTypeOf(operand); - const enum_tag: Air.Inst.Ref = switch (operand.ty.zigTypeTag()) { + const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag()) { .Enum => operand, .Union => { - //if (!operand.ty.unionHasTag()) { + //if (!operand_ty.unionHasTag()) { // return mod.fail( // &block.base, // operand_src, @@ -2728,58 +2710,44 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE }, else => { return mod.fail(&block.base, operand_src, "expected enum or tagged union, found {}", .{ - operand.ty, + operand_ty, }); }, }; + const enum_tag_ty = sema.getTypeOf(enum_tag); var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = try enum_tag.ty.intTagType(&int_tag_type_buffer).copy(arena); + const int_tag_ty = try enum_tag_ty.intTagType(&int_tag_type_buffer).copy(arena); - if (try sema.typeHasOnePossibleValue(block, src, enum_tag.ty)) |opv| { - return mod.constInst(arena, src, .{ - .ty = int_tag_ty, - .val = opv, - }); + if (try sema.typeHasOnePossibleValue(block, src, enum_tag_ty)) |opv| { + return sema.addConstant(int_tag_ty, opv); } if (try sema.resolvePossiblyUndefinedValue(block, operand_src, enum_tag)) |enum_tag_val| { if (enum_tag_val.castTag(.enum_field_index)) |enum_field_payload| { const field_index = enum_field_payload.data; - switch (enum_tag.ty.tag()) { + switch (enum_tag_ty.tag()) { .enum_full => { - const enum_full = enum_tag.ty.castTag(.enum_full).?.data; + const enum_full = enum_tag_ty.castTag(.enum_full).?.data; if (enum_full.values.count() != 0) { const val = enum_full.values.keys()[field_index]; - return mod.constInst(arena, src, .{ - .ty = int_tag_ty, - .val = val, - }); + return sema.addConstant(int_tag_ty, val); } else { // Field index and integer values are the same. const val = try Value.Tag.int_u64.create(arena, field_index); - return mod.constInst(arena, src, .{ - .ty = int_tag_ty, - .val = val, - }); + return sema.addConstant(int_tag_ty, val); } }, .enum_simple => { // Field index and integer values are the same. const val = try Value.Tag.int_u64.create(arena, field_index); - return mod.constInst(arena, src, .{ - .ty = int_tag_ty, - .val = val, - }); + return sema.addConstant(int_tag_ty, val); }, else => unreachable, } } else { // Assume it is already an integer and return it directly. - return mod.constInst(arena, src, .{ - .ty = int_tag_ty, - .val = enum_tag_val, - }); + return sema.addConstant(int_tag_ty, enum_tag_val); } } @@ -2790,7 +2758,6 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const target = mod.getTarget(); - const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -2805,10 +2772,7 @@ fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE if (try sema.resolvePossiblyUndefinedValue(block, operand_src, operand)) |int_val| { if (dest_ty.isNonexhaustiveEnum()) { - return mod.constInst(arena, src, .{ - .ty = dest_ty, - .val = int_val, - }); + return sema.addConstant(dest_ty, int_val); } if (int_val.isUndef()) { return sema.failWithUseOfUndef(block, operand_src); @@ -2832,10 +2796,7 @@ fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE }; return mod.failWithOwnedErrorMsg(&block.base, msg); } - return mod.constInst(arena, src, .{ - .ty = dest_ty, - .val = int_val, - }); + return sema.addConstant(dest_ty, int_val); } try sema.requireRuntimeBlock(block, src); @@ -2854,16 +2815,17 @@ fn zirOptionalPayloadPtr( const inst_data = sema.code.instructions.items(.data)[inst].un_node; const optional_ptr = sema.resolveInst(inst_data.operand); - assert(optional_ptr.ty.zigTypeTag() == .Pointer); + const optional_ptr_ty = sema.getTypeOf(optional_ptr); + assert(optional_ptr_ty.zigTypeTag() == .Pointer); const src = inst_data.src(); - const opt_type = optional_ptr.ty.elemType(); + const opt_type = optional_ptr_ty.elemType(); if (opt_type.zigTypeTag() != .Optional) { return sema.mod.fail(&block.base, src, "expected optional type, found {}", .{opt_type}); } const child_type = try opt_type.optionalChildAlloc(sema.arena); - const child_pointer = try Module.simplePtrType(sema.arena, child_type, !optional_ptr.ty.isConstPtr(), .One); + const child_pointer = try Module.simplePtrType(sema.arena, child_type, !optional_ptr_ty.isConstPtr(), .One); if (try sema.resolveDefinedValue(block, src, optional_ptr)) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); @@ -2871,10 +2833,7 @@ fn zirOptionalPayloadPtr( return sema.mod.fail(&block.base, src, "unable to unwrap null", .{}); } // The same Value represents the pointer to the optional and the payload. - return sema.mod.constInst(sema.arena, src, .{ - .ty = child_pointer, - .val = pointer_val, - }); + return sema.addConstant(child_pointer, pointer_val); } try sema.requireRuntimeBlock(block, src); @@ -2898,7 +2857,8 @@ fn zirOptionalPayload( const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - const opt_type = operand.ty; + const operand_ty = sema.getTypeOf(operand); + const opt_type = operand_ty; if (opt_type.zigTypeTag() != .Optional) { return sema.mod.fail(&block.base, src, "expected optional type, found {}", .{opt_type}); } @@ -2909,10 +2869,7 @@ fn zirOptionalPayload( if (val.isNull()) { return sema.mod.fail(&block.base, src, "unable to unwrap null", .{}); } - return sema.mod.constInst(sema.arena, src, .{ - .ty = child_type, - .val = val, - }); + return sema.addConstant(child_type, val); } try sema.requireRuntimeBlock(block, src); @@ -2936,25 +2893,27 @@ fn zirErrUnionPayload( const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - if (operand.ty.zigTypeTag() != .ErrorUnion) - return sema.mod.fail(&block.base, operand.src, "expected error union type, found '{}'", .{operand.ty}); + const operand_src = src; + const operand_ty = sema.getTypeOf(operand); + if (operand_ty.zigTypeTag() != .ErrorUnion) + return sema.mod.fail(&block.base, operand_src, "expected error union type, found '{}'", .{operand_ty}); if (try sema.resolveDefinedValue(block, src, operand)) |val| { if (val.getError()) |name| { return sema.mod.fail(&block.base, src, "caught unexpected error '{s}'", .{name}); } const data = val.castTag(.error_union).?.data; - return sema.mod.constInst(sema.arena, src, .{ - .ty = operand.ty.castTag(.error_union).?.data.payload, - .val = data, - }); + return sema.addConstant( + operand_ty.castTag(.error_union).?.data.payload, + data, + ); } try sema.requireRuntimeBlock(block, src); if (safety_check and block.wantSafety()) { const is_non_err = try block.addUnOp(.is_err, operand); try sema.addSafetyCheck(block, is_non_err, .unwrap_errunion); } - const result_ty = operand.ty.castTag(.error_union).?.data.payload; + const result_ty = operand_ty.castTag(.error_union).?.data.payload; return block.addTyOp(.unwrap_errunion_payload, result_ty, operand); } @@ -2971,12 +2930,13 @@ fn zirErrUnionPayloadPtr( const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - assert(operand.ty.zigTypeTag() == .Pointer); + const operand_ty = sema.getTypeOf(operand); + assert(operand_ty.zigTypeTag() == .Pointer); - if (operand.ty.elemType().zigTypeTag() != .ErrorUnion) - return sema.mod.fail(&block.base, src, "expected error union type, found {}", .{operand.ty.elemType()}); + if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) + return sema.mod.fail(&block.base, src, "expected error union type, found {}", .{operand_ty.elemType()}); - const operand_pointer_ty = try Module.simplePtrType(sema.arena, operand.ty.elemType().castTag(.error_union).?.data.payload, !operand.ty.isConstPtr(), .One); + const operand_pointer_ty = try Module.simplePtrType(sema.arena, operand_ty.elemType().castTag(.error_union).?.data.payload, !operand_ty.isConstPtr(), .One); if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); @@ -2985,13 +2945,13 @@ fn zirErrUnionPayloadPtr( } const data = val.castTag(.error_union).?.data; // The same Value represents the pointer to the error union and the payload. - return sema.mod.constInst(sema.arena, src, .{ - .ty = operand_pointer_ty, - .val = try Value.Tag.ref_val.create( + return sema.addConstant( + operand_pointer_ty, + try Value.Tag.ref_val.create( sema.arena, data, ), - }); + ); } try sema.requireRuntimeBlock(block, src); @@ -3010,18 +2970,16 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compi const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - if (operand.ty.zigTypeTag() != .ErrorUnion) - return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand.ty}); + const operand_ty = sema.getTypeOf(operand); + if (operand_ty.zigTypeTag() != .ErrorUnion) + return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand_ty}); - const result_ty = operand.ty.castTag(.error_union).?.data.error_set; + const result_ty = operand_ty.castTag(.error_union).?.data.error_set; if (try sema.resolveDefinedValue(block, src, operand)) |val| { assert(val.getError() != null); const data = val.castTag(.error_union).?.data; - return sema.mod.constInst(sema.arena, src, .{ - .ty = result_ty, - .val = data, - }); + return sema.addConstant(result_ty, data); } try sema.requireRuntimeBlock(block, src); @@ -3036,21 +2994,19 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Co const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - assert(operand.ty.zigTypeTag() == .Pointer); + const operand_ty = sema.getTypeOf(operand); + assert(operand_ty.zigTypeTag() == .Pointer); - if (operand.ty.elemType().zigTypeTag() != .ErrorUnion) - return sema.mod.fail(&block.base, src, "expected error union type, found {}", .{operand.ty.elemType()}); + if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) + return sema.mod.fail(&block.base, src, "expected error union type, found {}", .{operand_ty.elemType()}); - const result_ty = operand.ty.elemType().castTag(.error_union).?.data.error_set; + const result_ty = operand_ty.elemType().castTag(.error_union).?.data.error_set; if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); assert(val.getError() != null); const data = val.castTag(.error_union).?.data; - return sema.mod.constInst(sema.arena, src, .{ - .ty = result_ty, - .val = data, - }); + return sema.addConstant(result_ty, data); } try sema.requireRuntimeBlock(block, src); @@ -3064,9 +3020,10 @@ fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - if (operand.ty.zigTypeTag() != .ErrorUnion) - return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand.ty}); - if (operand.ty.castTag(.error_union).?.data.payload.zigTypeTag() != .Void) { + const operand_ty = sema.getTypeOf(operand); + if (operand_ty.zigTypeTag() != .ErrorUnion) + return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand_ty}); + if (operand_ty.castTag(.error_union).?.data.payload.zigTypeTag() != .Void) { return sema.mod.fail(&block.base, src, "expression value is ignored", .{}); } } @@ -3233,10 +3190,10 @@ fn funcCommon( } if (is_extern) { - return sema.mod.constInst(sema.arena, src, .{ - .ty = fn_ty, - .val = try Value.Tag.extern_fn.create(sema.arena, sema.owner_decl), - }); + return sema.addConstant( + fn_ty, + try Value.Tag.extern_fn.create(sema.arena, sema.owner_decl), + ); } if (body_inst == 0) { @@ -3261,11 +3218,7 @@ fn funcCommon( .base = .{ .tag = .function }, .data = new_func, }; - const result = try sema.mod.constInst(sema.arena, src, .{ - .ty = fn_ty, - .val = Value.initPayload(&fn_payload.base), - }); - return result; + return sema.addConstant(fn_ty, Value.initPayload(&fn_payload.base)); } fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -3324,7 +3277,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); const object = sema.resolveInst(extra.lhs); - const object_ptr = if (object.ty.zigTypeTag() == .Pointer) + const object_ptr = if (sema.getTypeOf(object).zigTypeTag() == .Pointer) object else try sema.analyzeRef(block, src, object); @@ -3397,13 +3350,14 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr ), }; - switch (operand.ty.zigTypeTag()) { + const operand_ty = sema.getTypeOf(operand); + switch (operand_ty.zigTypeTag()) { .ComptimeInt, .Int => {}, else => return sema.mod.fail( &block.base, operand_src, "expected integer type, found '{}'", - .{operand.ty}, + .{operand_ty}, ), } @@ -3454,13 +3408,14 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE ), }; - switch (operand.ty.zigTypeTag()) { + const operand_ty = sema.getTypeOf(operand); + switch (operand_ty.zigTypeTag()) { .ComptimeFloat, .Float, .ComptimeInt => {}, else => return sema.mod.fail( &block.base, operand_src, "expected float type, found '{}'", - .{operand.ty}, + .{operand_ty}, ), } @@ -3479,7 +3434,8 @@ fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr const bin_inst = sema.code.instructions.items(.data)[inst].bin; const array = sema.resolveInst(bin_inst.lhs); - const array_ptr = if (array.ty.zigTypeTag() == .Pointer) + const array_ty = sema.getTypeOf(array); + const array_ptr = if (array_ty.zigTypeTag() == .Pointer) array else try sema.analyzeRef(block, sema.src, array); @@ -3497,7 +3453,8 @@ fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compil const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array = sema.resolveInst(extra.lhs); - const array_ptr = if (array.ty.zigTypeTag() == .Pointer) + const array_ty = sema.getTypeOf(array); + const array_ptr = if (array_ty.zigTypeTag() == .Pointer) array else try sema.analyzeRef(block, src, array); @@ -3705,9 +3662,10 @@ fn analyzeSwitch( const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const special_prong_src: LazySrcLoc = .{ .node_offset_switch_special_prong = src_node_offset }; const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset }; + const operand_ty = sema.getTypeOf(operand); // Validate usage of '_' prongs. - if (special_prong == .under and !operand.ty.isNonexhaustiveEnum()) { + if (special_prong == .under and !operand_ty.isNonexhaustiveEnum()) { const msg = msg: { const msg = try mod.errMsg( &block.base, @@ -3729,9 +3687,9 @@ fn analyzeSwitch( } // Validate for duplicate items, missing else prong, and invalid range. - switch (operand.ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag()) { .Enum => { - var seen_fields = try gpa.alloc(?Module.SwitchProngSrc, operand.ty.enumFieldCount()); + var seen_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount()); defer gpa.free(seen_fields); mem.set(?Module.SwitchProngSrc, seen_fields, null); @@ -3777,7 +3735,7 @@ fn analyzeSwitch( ); } - try sema.validateSwitchNoRange(block, ranges_len, operand.ty, src_node_offset); + try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); } } const all_tags_handled = for (seen_fields) |seen_src| { @@ -3798,7 +3756,7 @@ fn analyzeSwitch( for (seen_fields) |seen_src, i| { if (seen_src != null) continue; - const field_name = operand.ty.enumFieldName(i); + const field_name = operand_ty.enumFieldName(i); // TODO have this point to the tag decl instead of here try mod.errNote( @@ -3810,10 +3768,10 @@ fn analyzeSwitch( ); } try mod.errNoteNonLazy( - operand.ty.declSrcLoc(), + operand_ty.declSrcLoc(), msg, "enum '{}' declared here", - .{operand.ty}, + .{operand_ty}, ); break :msg msg; }; @@ -3908,12 +3866,12 @@ fn analyzeSwitch( } check_range: { - if (operand.ty.zigTypeTag() == .Int) { + if (operand_ty.zigTypeTag() == .Int) { var arena = std.heap.ArenaAllocator.init(gpa); defer arena.deinit(); - const min_int = try operand.ty.minInt(&arena, mod.getTarget()); - const max_int = try operand.ty.maxInt(&arena, mod.getTarget()); + const min_int = try operand_ty.minInt(&arena, mod.getTarget()); + const max_int = try operand_ty.maxInt(&arena, mod.getTarget()); if (try range_set.spans(min_int, max_int)) { if (special_prong == .@"else") { return mod.fail( @@ -3983,7 +3941,7 @@ fn analyzeSwitch( ); } - try sema.validateSwitchNoRange(block, ranges_len, operand.ty, src_node_offset); + try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); } } switch (special_prong) { @@ -4015,7 +3973,7 @@ fn analyzeSwitch( &block.base, src, "else prong required when switching on type '{}'", - .{operand.ty}, + .{operand_ty}, ); } @@ -4063,7 +4021,7 @@ fn analyzeSwitch( ); } - try sema.validateSwitchNoRange(block, ranges_len, operand.ty, src_node_offset); + try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); } } }, @@ -4083,20 +4041,15 @@ fn analyzeSwitch( .ComptimeFloat, .Float, => return mod.fail(&block.base, operand_src, "invalid switch operand type '{}'", .{ - operand.ty, + operand_ty, }), } - const block_inst = try sema.arena.create(Inst.Block); - block_inst.* = .{ - .base = .{ - .tag = Inst.Block.base_tag, - .ty = undefined, // Set after analysis. - .src = src, - }, - .body = undefined, - }; - + const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + try sema.air_instructions.append(gpa, .{ + .tag = .block, + .data = undefined, + }); var label: Scope.Block.Label = .{ .zir_block = switch_inst, .merges = .{ @@ -4634,7 +4587,7 @@ fn zirBitwise( const lhs_ty = sema.getTypeOf(lhs); const rhs_ty = sema.getTypeOf(rhs); - const instructions = &[_]Air.Inst.Index{ lhs, rhs }; + const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -4763,18 +4716,8 @@ fn analyzeArithmetic( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const instructions = &[_]Air.Inst.Index{ lhs, rhs }; - const resolved_type = try sema.resolvePeerTypes(block, src, instructions); - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); - const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - - const scalar_type = if (resolved_type.zigTypeTag() == .Vector) - resolved_type.elemType() - else - resolved_type; - - const scalar_tag = scalar_type.zigTypeTag(); - + const lhs_ty = sema.getTypeOf(lhs); + const rhs_ty = sema.getTypeOf(rhs); if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) { if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { return sema.mod.fail(&block.base, src, "vector length mismatch: {d} and {d}", .{ @@ -4790,6 +4733,18 @@ fn analyzeArithmetic( }); } + const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; + const resolved_type = try sema.resolvePeerTypes(block, src, instructions); + const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); + const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); + + const scalar_type = if (resolved_type.zigTypeTag() == .Vector) + resolved_type.elemType() + else + resolved_type; + + const scalar_tag = scalar_type.zigTypeTag(); + const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; const is_float = scalar_tag == .Float or scalar_tag == .ComptimeFloat; @@ -4807,10 +4762,7 @@ fn analyzeArithmetic( if (rhs_val.compareWithZero(.eq)) { switch (zir_tag) { .add, .addwrap, .sub, .subwrap => { - return sema.mod.constInst(sema.arena, src, .{ - .ty = scalar_type, - .val = lhs_val, - }); + return sema.addConstant(scalar_type, lhs_val); }, else => {}, } @@ -4850,10 +4802,7 @@ fn analyzeArithmetic( log.debug("{s}({}, {}) result: {}", .{ @tagName(zir_tag), lhs_val, rhs_val, value }); - return sema.mod.constInst(sema.arena, src, .{ - .ty = scalar_type, - .val = value, - }); + return sema.addConstant(scalar_type, value); } } @@ -5167,16 +5116,16 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr // args: []const FnArg, field_values[5] = Value.initTag(.null_value); // TODO - return sema.mod.constInst(sema.arena, src, .{ - .ty = type_info_ty, - .val = try Value.Tag.@"union".create(sema.arena, .{ + return sema.addConstant( + type_info_ty, + try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create( sema.arena, @enumToInt(@typeInfo(std.builtin.TypeInfo).Union.tag_type.?.Fn), ), .val = try Value.Tag.@"struct".create(sema.arena, field_values.ptr), }), - }); + ); }, else => |t| return sema.mod.fail(&block.base, src, "TODO: implement zirTypeInfo for {s}", .{ @tagName(t), @@ -5189,7 +5138,8 @@ fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErro const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; const operand = sema.resolveInst(inst_data.operand); - return sema.addType(operand.ty); + const operand_ty = sema.getTypeOf(operand); + return sema.addType(operand_ty); } fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -5241,11 +5191,12 @@ fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); + const operand_src = src; // TODO put this on the operand, not the `!` const uncasted_operand = sema.resolveInst(inst_data.operand); const bool_type = Type.initTag(.bool); - const operand = try sema.coerce(block, bool_type, uncasted_operand, uncasted_operand.src); - if (try sema.resolveDefinedValue(block, src, operand)) |val| { + const operand = try sema.coerce(block, bool_type, uncasted_operand, operand_src); + if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { if (val.toBool()) { return Air.Inst.Ref.bool_false; } else { @@ -5267,12 +5218,13 @@ fn zirBoolBr( const datas = sema.code.instructions.items(.data); const inst_data = datas[inst].bool_br; - const src: LazySrcLoc = .unneeded; const lhs = sema.resolveInst(inst_data.lhs); + const lhs_src = sema.src; const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; + const gpa = sema.gpa; - if (try sema.resolveDefinedValue(parent_block, src, lhs)) |lhs_val| { + if (try sema.resolveDefinedValue(parent_block, lhs_src, lhs)) |lhs_val| { if (lhs_val.toBool() == is_bool_or) { if (is_bool_or) { return Air.Inst.Ref.bool_true; @@ -5286,49 +5238,59 @@ fn zirBoolBr( return sema.resolveBody(parent_block, body); } - const block_inst = try sema.arena.create(Inst.Block); - block_inst.* = .{ - .base = .{ - .tag = Inst.Block.base_tag, - .ty = Type.initTag(.bool), - .src = src, - }, - .body = undefined, - }; + const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + try sema.air_instructions.append(gpa, .{ + .tag = .block, + .data = .{ .ty_pl = .{ + .ty = .bool_type, + .payload = undefined, + } }, + }); var child_block = parent_block.makeSubBlock(); child_block.runtime_loop = null; - child_block.runtime_cond = lhs.src; + child_block.runtime_cond = lhs_src; child_block.runtime_index += 1; - defer child_block.instructions.deinit(sema.gpa); + defer child_block.instructions.deinit(gpa); var then_block = child_block.makeSubBlock(); - defer then_block.instructions.deinit(sema.gpa); + defer then_block.instructions.deinit(gpa); var else_block = child_block.makeSubBlock(); - defer else_block.instructions.deinit(sema.gpa); + defer else_block.instructions.deinit(gpa); const lhs_block = if (is_bool_or) &then_block else &else_block; const rhs_block = if (is_bool_or) &else_block else &then_block; - const lhs_result = try sema.mod.constInst(sema.arena, src, .{ - .ty = Type.initTag(.bool), - .val = if (is_bool_or) Value.initTag(.bool_true) else Value.initTag(.bool_false), - }); - _ = try lhs_block.addBr(src, block_inst, lhs_result); + const lhs_result: Air.Inst.Ref = if (is_bool_or) .bool_true else .bool_false; + _ = try lhs_block.addBr(block_inst, lhs_result); const rhs_result = try sema.resolveBody(rhs_block, body); - _ = try rhs_block.addBr(src, block_inst, rhs_result); + _ = try rhs_block.addBr(block_inst, rhs_result); - const air_then_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, then_block.instructions.items) }; - const air_else_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, else_block.instructions.items) }; - _ = try child_block.addCondBr(src, lhs, air_then_body, air_else_body); + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + + then_block.instructions.items.len + else_block.instructions.items.len + + @typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len); - block_inst.body = .{ - .instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items), - }; - try parent_block.instructions.append(sema.gpa, &block_inst.base); - return &block_inst.base; + sema.air_instructions.items(.data)[block_inst].ty_pl.payload = sema.addExtraAssumeCapacity( + Air.Block{ .body_len = @intCast(u32, child_block.instructions.items.len) }, + ); + sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items); + + const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{ + .then_body_len = @intCast(u32, then_block.instructions.items.len), + .else_body_len = @intCast(u32, else_block.instructions.items.len), + }); + sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items); + sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items); + + _ = try child_block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{ + .operand = lhs, + .payload = cond_br_payload, + } } }); + + try parent_block.instructions.append(gpa, block_inst); + return indexToRef(block_inst); } fn zirIsNonNull( @@ -5439,7 +5401,7 @@ fn zirUnreachable(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compil if (safety_check and block.wantSafety()) { return sema.safetyPanic(block, src, .unreach); } else { - _ = try block.addNoOp(src, Type.initTag(.noreturn), .unreach); + _ = try block.addNoOp(.unreach); return always_noreturn; } } @@ -5461,10 +5423,10 @@ fn zirRetErrValue( } // Return the error code from the function. const kv = try sema.mod.getErrorValue(err_name); - const result_inst = try sema.mod.constInst(sema.arena, src, .{ - .ty = try Type.Tag.error_set_single.create(sema.arena, kv.key), - .val = try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), - }); + const result_inst = try sema.addConstant( + try Type.Tag.error_set_single.create(sema.arena, kv.key), + try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), + ); return sema.analyzeRet(block, result_inst, src, true); } @@ -5505,7 +5467,7 @@ fn analyzeRet( if (block.inlining) |inlining| { // We are inlining a function call; rewrite the `ret` as a `break`. try inlining.merges.results.append(sema.gpa, operand); - _ = try block.addBr(src, inlining.merges.block_inst, operand); + _ = try block.addBr(inlining.merges.block_inst, operand); return always_noreturn; } @@ -5613,10 +5575,7 @@ fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Co const src = inst_data.src(); const struct_type = try sema.resolveType(block, src, inst_data.operand); - return sema.mod.constInst(sema.arena, src, .{ - .ty = struct_type, - .val = Value.initTag(.empty_struct_value), - }); + return sema.addConstant(struct_type, Value.initTag(.empty_struct_value)); } fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -5696,10 +5655,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: root_msg = try mod.errMsg(&block.base, src, template, args); } } else { - field_inits[i] = try mod.constInst(sema.arena, src, .{ - .ty = field.ty, - .val = field.default_val, - }); + field_inits[i] = try sema.addConstant(field.ty, field.default_val); } } if (root_msg) |msg| { @@ -5729,10 +5685,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: for (field_inits) |field_init, i| { values[i] = field_init.value().?; } - return mod.constInst(sema.arena, src, .{ - .ty = struct_ty, - .val = try Value.Tag.@"struct".create(sema.arena, values.ptr), - }); + return sema.addConstant(struct_ty, try Value.Tag.@"struct".create(sema.arena, values.ptr)); } return mod.fail(&block.base, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{}); @@ -5913,20 +5866,13 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr .base = .{ .tag = .int_u64 }, .data = addr, }; - return sema.mod.constInst(sema.arena, src, .{ - .ty = type_res, - .val = Value.initPayload(&val_payload.base), - }); + return sema.addConstant(type_res, Value.initPayload(&val_payload.base)); } try sema.requireRuntimeBlock(block, src); if (block.wantSafety()) { - const zero = try sema.mod.constInst(sema.arena, src, .{ - .ty = Type.initTag(.u64), - .val = Value.initTag(.zero), - }); if (!type_res.isAllowzeroPtr()) { - const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, zero); + const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize); try sema.addSafetyCheck(block, is_non_zero, .cast_to_null); } @@ -5936,12 +5882,12 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr .base = .{ .tag = .int_u64 }, .data = ptr_align - 1, }; - const align_minus_1 = try sema.mod.constInst(sema.arena, src, .{ - .ty = Type.initTag(.u64), - .val = Value.initPayload(&val_payload.base), - }); + const align_minus_1 = try sema.addConstant( + Type.initTag(.usize), + Value.initPayload(&val_payload.base), + ); const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1); - const is_aligned = try block.addBinOp(.cmp_eq, remainder, zero); + const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); try sema.addSafetyCheck(block, is_aligned, .incorrect_alignment); } } @@ -6217,10 +6163,10 @@ fn zirVarExtended( .is_mutable = true, // TODO get rid of this unused field .is_threadlocal = small.is_threadlocal, }; - const result = try sema.mod.constInst(sema.arena, src, .{ - .ty = var_ty, - .val = try Value.Tag.variable.create(sema.arena, new_var), - }); + const result = try sema.addConstant( + var_ty, + try Value.Tag.variable.create(sema.arena, new_var), + ); return result; } @@ -6380,32 +6326,13 @@ pub const PanicId = enum { invalid_error_code, }; -fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Ref, panic_id: PanicId) !void { - const block_inst = try sema.arena.create(Inst.Block); - block_inst.* = .{ - .base = .{ - .tag = Inst.Block.base_tag, - .ty = Type.initTag(.void), - .src = ok.src, - }, - .body = .{ - .instructions = try sema.arena.alloc(Air.Inst.Index, 1), // Only need space for the condbr. - }, - }; - - const ok_body: ir.Body = .{ - .instructions = try sema.arena.alloc(Air.Inst.Index, 1), // Only need space for the br_void. - }; - const br_void = try sema.arena.create(Inst.BrVoid); - br_void.* = .{ - .base = .{ - .tag = .br_void, - .ty = Type.initTag(.noreturn), - .src = ok.src, - }, - .block = block_inst, - }; - ok_body.instructions[0] = &br_void.base; +fn addSafetyCheck( + sema: *Sema, + parent_block: *Scope.Block, + ok: Air.Inst.Ref, + panic_id: PanicId, +) !void { + const gpa = sema.gpa; var fail_block: Scope.Block = .{ .parent = parent_block, @@ -6416,26 +6343,55 @@ fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Ref, pan .is_comptime = parent_block.is_comptime, }; - defer fail_block.instructions.deinit(sema.gpa); + defer fail_block.instructions.deinit(gpa); - _ = try sema.safetyPanic(&fail_block, ok.src, panic_id); + _ = try sema.safetyPanic(&fail_block, .unneeded, panic_id); - const fail_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, fail_block.instructions.items) }; + try parent_block.instructions.ensureUnusedCapacity(gpa, 1); - const condbr = try sema.arena.create(Inst.CondBr); - condbr.* = .{ - .base = .{ - .tag = .condbr, - .ty = Type.initTag(.noreturn), - .src = ok.src, - }, - .condition = ok, - .then_body = ok_body, - .else_body = fail_body, - }; - block_inst.body.instructions[0] = &condbr.base; + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + + 1 + // The main block only needs space for the cond_br. + @typeInfo(Air.CondBr).Struct.fields.len + + 1 + // The ok branch of the cond_br only needs space for the br. + fail_block.instructions.items.len); - try parent_block.instructions.append(sema.gpa, &block_inst.base); + try sema.air_instructions.ensureUnusedCapacity(gpa, 3); + const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const cond_br_inst = block_inst + 1; + const br_inst = cond_br_inst + 1; + sema.air_instructions.appendAssumeCapacity(gpa, .{ + .tag = .block, + .data = .{ .ty_pl = .{ + .ty = .void_type, + .payload = sema.addExtraAssumeCapacity(Air.Block{ + .body_len = 1, + }), + } }, + }); + sema.air_extra.appendAssumeCapacity(cond_br_inst); + + sema.air_instructions.appendAssumeCapacity(gpa, .{ + .tag = .cond_br, + .data = .{ .pl_op = .{ + .operand = ok, + .payload = sema.addExtraAssumeCapacity(Air.CondBr{ + .then_body_len = 1, + .else_body_len = @intCast(u32, fail_block.instructions.items.len), + }), + } }, + }); + sema.air_extra.appendAssumeCapacity(br_inst); + sema.air_extra.appendSliceAssumeCapacity(fail_block.instructions.items); + + sema.air_instructions.appendAssumeCapacity(gpa, .{ + .tag = .br, + .data = .{ .br = .{ + .block_inst = block_inst, + .operand = .void_value, + } }, + }); + + parent_block.instructions.appendAssumeCapacity(block_inst); } fn panicWithMsg( @@ -6451,18 +6407,18 @@ fn panicWithMsg( mod.comp.bin_file.options.object_format == .c; if (!this_feature_is_implemented_in_the_backend) { // TODO implement this feature in all the backends and then delete this branch - _ = try block.addNoOp(src, Type.initTag(.void), .breakpoint); - _ = try block.addNoOp(src, Type.initTag(.noreturn), .unreach); + _ = try block.addNoOp(.breakpoint); + _ = try block.addNoOp(.unreach); return always_noreturn; } const panic_fn = try sema.getBuiltin(block, src, "panic"); const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty); const ptr_stack_trace_ty = try Module.simplePtrType(arena, stack_trace_ty, true, .One); - const null_stack_trace = try mod.constInst(arena, src, .{ - .ty = try mod.optionalType(arena, ptr_stack_trace_ty), - .val = Value.initTag(.null_value), - }); + const null_stack_trace = try sema.addConstant( + try mod.optionalType(arena, ptr_stack_trace_ty), + Value.initTag(.null_value), + ); const args = try arena.create([2]Air.Inst.Index); args.* = .{ msg_inst, null_stack_trace }; _ = try sema.analyzeCall(block, panic_fn, src, src, .auto, false, args); @@ -6503,7 +6459,6 @@ fn safetyPanic( }; const casted_msg_inst = try sema.coerce(block, Type.initTag(.const_slice_u8), msg_inst, src); - return sema.panicWithMsg(block, src, casted_msg_inst); } @@ -6533,13 +6488,13 @@ fn namedFieldPtr( switch (elem_ty.zigTypeTag()) { .Array => { if (mem.eql(u8, field_name, "len")) { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.single_const_pointer_to_comptime_int), - .val = try Value.Tag.ref_val.create( + return sema.addConstant( + Type.initTag(.single_const_pointer_to_comptime_int), + try Value.Tag.ref_val.create( arena, try Value.Tag.int_u64.create(arena, elem_ty.arrayLen()), ), - }); + ); } else { return mod.fail( &block.base, @@ -6554,13 +6509,13 @@ fn namedFieldPtr( switch (ptr_child.zigTypeTag()) { .Array => { if (mem.eql(u8, field_name, "len")) { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.single_const_pointer_to_comptime_int), - .val = try Value.Tag.ref_val.create( + return sema.addConstant( + Type.initTag(.single_const_pointer_to_comptime_int), + try Value.Tag.ref_val.create( arena, try Value.Tag.int_u64.create(arena, ptr_child.arrayLen()), ), - }); + ); } else { return mod.fail( &block.base, @@ -6597,15 +6552,15 @@ fn namedFieldPtr( }); } else (try mod.getErrorValue(field_name)).key; - return mod.constInst(arena, src, .{ - .ty = try Module.simplePtrType(arena, child_type, false, .One), - .val = try Value.Tag.ref_val.create( + return sema.addConstant( + try Module.simplePtrType(arena, child_type, false, .One), + try Value.Tag.ref_val.create( arena, try Value.Tag.@"error".create(arena, .{ .name = name, }), ), - }); + ); }, .Struct, .Opaque, .Union => { if (child_type.getNamespace()) |namespace| { @@ -6651,10 +6606,10 @@ fn namedFieldPtr( }; const field_index_u32 = @intCast(u32, field_index); const enum_val = try Value.Tag.enum_field_index.create(arena, field_index_u32); - return mod.constInst(arena, src, .{ - .ty = try Module.simplePtrType(arena, child_type, false, .One), - .val = try Value.Tag.ref_val.create(arena, enum_val), - }); + return sema.addConstant( + try Module.simplePtrType(arena, child_type, false, .One), + try Value.Tag.ref_val.create(arena, enum_val), + ); }, else => return mod.fail(&block.base, src, "type '{}' has no members", .{child_type}), } @@ -6701,7 +6656,6 @@ fn analyzeStructFieldPtr( field_name_src: LazySrcLoc, unresolved_struct_ty: Type, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; const arena = sema.arena; assert(unresolved_struct_ty.zigTypeTag() == .Struct); @@ -6714,13 +6668,13 @@ fn analyzeStructFieldPtr( const ptr_field_ty = try Module.simplePtrType(arena, field.ty, true, .One); if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { - return mod.constInst(arena, src, .{ - .ty = ptr_field_ty, - .val = try Value.Tag.field_ptr.create(arena, .{ + return sema.addConstant( + ptr_field_ty, + try Value.Tag.field_ptr.create(arena, .{ .container_ptr = struct_ptr_val, .field_index = field_index, }), - }); + ); } try sema.requireRuntimeBlock(block, src); @@ -6751,13 +6705,13 @@ fn analyzeUnionFieldPtr( if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| { // TODO detect inactive union field and emit compile error - return mod.constInst(arena, src, .{ - .ty = ptr_field_ty, - .val = try Value.Tag.field_ptr.create(arena, .{ + return sema.addConstant( + ptr_field_ty, + try Value.Tag.field_ptr.create(arena, .{ .container_ptr = union_ptr_val, .field_index = field_index, }), - }); + ); } try sema.requireRuntimeBlock(block, src); @@ -6808,10 +6762,10 @@ fn elemPtrArray( const elem_ptr = try array_ptr_val.elemPtr(sema.arena, @intCast(usize, index_u64)); const pointee_type = array_ptr.ty.elemType().elemType(); - return sema.mod.constInst(sema.arena, src, .{ - .ty = try Type.Tag.single_const_pointer.create(sema.arena, pointee_type), - .val = elem_ptr, - }); + return sema.addConstant( + try Type.Tag.single_const_pointer.create(sema.arena, pointee_type), + elem_ptr, + ); } } _ = elem_index; @@ -6870,7 +6824,7 @@ fn coerce( .Optional => { // null to ?T if (inst_ty.zigTypeTag() == .Null) { - return mod.constInst(arena, inst_src, .{ .ty = dest_type, .val = Value.initTag(.null_value) }); + return sema.addConstant(dest_type, Value.initTag(.null_value)); } // T to ?T @@ -6981,10 +6935,10 @@ fn coerce( }; return mod.failWithOwnedErrorMsg(&block.base, msg); }; - return mod.constInst(arena, inst_src, .{ - .ty = resolved_dest_type, - .val = try Value.Tag.enum_field_index.create(arena, @intCast(u32, field_index)), - }); + return sema.addConstant( + resolved_dest_type, + try Value.Tag.enum_field_index.create(arena, @intCast(u32, field_index)), + ); } }, else => {}, @@ -7024,7 +6978,7 @@ fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.R if (!val.intFitsInType(dest_type, target)) { return sema.mod.fail(&block.base, inst.src, "type {} cannot represent integer value {}", .{ inst.ty, val }); } - return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); + return sema.addConstant(dest_type, val); } } else if (dst_zig_tag == .ComptimeFloat or dst_zig_tag == .Float) { if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) { @@ -7037,7 +6991,7 @@ fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.R ), error.OutOfMemory => return error.OutOfMemory, }; - return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = res }); + return sema.addConstant(dest_type, res); } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) { return sema.mod.fail(&block.base, inst.src, "TODO int to float", .{}); } @@ -7132,7 +7086,7 @@ fn bitcast( fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) CompileError!Air.Inst.Ref { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. - return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); + return sema.addConstant(dest_type, val); } return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{}); } @@ -7140,7 +7094,7 @@ fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Ref { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. - return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); + return sema.addConstant(dest_type, val); } return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); } @@ -7200,13 +7154,11 @@ fn analyzeRef( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - const ptr_type = try sema.mod.simplePtrType(sema.arena, operand.ty, false, .One); + const operand_ty = sema.getTypeOf(operand); + const ptr_type = try sema.mod.simplePtrType(sema.arena, operand_ty, false, .One); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |val| { - return sema.mod.constInst(sema.arena, src, .{ - .ty = ptr_type, - .val = try Value.Tag.ref_val.create(sema.arena, val), - }); + return sema.addConstant(ptr_type, try Value.Tag.ref_val.create(sema.arena, val)); } try sema.requireRuntimeBlock(block, src); @@ -7267,7 +7219,8 @@ fn analyzeIsNonErr( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - const ot = operand.ty.zigTypeTag(); + const operand_ty = sema.getTypeOf(operand); + const ot = operand_ty.zigTypeTag(); if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true; if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; assert(ot == .ErrorUnion); @@ -7549,7 +7502,7 @@ fn cmpNumeric( fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Index { if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { - return sema.mod.constInst(sema.arena, inst_src, .{ .ty = dest_type, .val = val }); + return sema.addConstant(dest_type, val); } try sema.requireRuntimeBlock(block, inst.src); @@ -7614,11 +7567,8 @@ fn wrapErrorUnion( else => unreachable, } - return sema.mod.constInst(sema.arena, inst.src, .{ - .ty = dest_type, - // creating a SubValue for the error_union payload - .val = try Value.Tag.error_union.create(sema.arena, val), - }); + // Create a SubValue for the error_union payload. + return sema.addConstant(dest_type, try Value.Tag.error_union.create(sema.arena, val)); } try sema.requireRuntimeBlock(block, inst.src); diff --git a/src/Zir.zig b/src/Zir.zig index e14b636ab6..42924817fc 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -386,7 +386,7 @@ pub const Inst = struct { int, /// Arbitrary sized integer literal. Uses the `str` union field. int_big, - /// A float literal that fits in a f32. Uses the float union value. + /// A float literal that fits in a f64. Uses the float union value. float, /// A float literal that fits in a f128. Uses the `pl_node` union value. /// Payload is `Float128`. @@ -2058,16 +2058,7 @@ pub const Inst = struct { /// Offset from Decl AST node index. node: i32, int: u64, - float: struct { - /// Offset from Decl AST node index. - /// `Tag` determines which kind of AST node this points to. - src_node: i32, - number: f32, - - pub fn src(self: @This()) LazySrcLoc { - return .{ .node_offset = self.src_node }; - } - }, + float: f64, array_type_sentinel: struct { len: Ref, /// index into extra, points to an `ArrayTypeSentinel` @@ -3256,10 +3247,8 @@ const Writer = struct { } fn writeFloat(self: *Writer, stream: anytype, inst: Inst.Index) !void { - const inst_data = self.code.instructions.items(.data)[inst].float; - const src = inst_data.src(); - try stream.print("{d}) ", .{inst_data.number}); - try self.writeSrc(stream, src); + const number = self.code.instructions.items(.data)[inst].float; + try stream.print("{d})", .{number}); } fn writeFloat128(self: *Writer, stream: anytype, inst: Inst.Index) !void { -- cgit v1.2.3 From c020a302960c499ffe811dd0601a2d386c191b91 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Jul 2021 21:57:40 -0700 Subject: Sema: remove br_block_flat AIR instruction Thanks to the new AIR memory layout, we can do this by turning a br operand into a block, rather than having this special purpose instruction. --- BRANCH_TODO | 42 -------------- src/Air.zig | 4 -- src/Liveness.zig | 2 - src/Module.zig | 2 +- src/Sema.zig | 170 ++++++++++++++++++++++++++++++++++--------------------- 5 files changed, 105 insertions(+), 115 deletions(-) (limited to 'src/Module.zig') diff --git a/BRANCH_TODO b/BRANCH_TODO index aaba8b70b3..9055cda307 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -16,48 +16,6 @@ return inst.val; } - pub fn breakBlock(base: *Inst) ?*Block { - return switch (base.tag) { - .br => base.castTag(.br).?.block, - .br_void => base.castTag(.br_void).?.block, - .br_block_flat => base.castTag(.br_block_flat).?.block, - else => null, - }; - } - - pub const convertable_br_size = std.math.max(@sizeOf(BrBlockFlat), @sizeOf(Br)); - pub const convertable_br_align = std.math.max(@alignOf(BrBlockFlat), @alignOf(Br)); - comptime { - assert(@offsetOf(BrBlockFlat, "base") == @offsetOf(Br, "base")); - } - - pub const BrBlockFlat = struct { - pub const base_tag = Tag.br_block_flat; - - base: Inst, - block: *Block, - body: Body, - - pub fn operandCount(self: *const BrBlockFlat) usize { - _ = self; - return 0; - } - pub fn getOperand(self: *const BrBlockFlat, index: usize) ?*Inst { - _ = self; - _ = index; - return null; - } - }; - - /// Same as `br` except the operand is a list of instructions to be treated as - /// a flat block; that is there is only 1 break instruction from the block, and - /// it is implied to be after the last instruction, and the last instruction is - /// the break operand. - /// This instruction exists for late-stage semantic analysis patch ups, to - /// replace one br operand with multiple instructions, without moving anything else around. - br_block_flat, - - /// For debugging purposes, prints a function representation to stderr. diff --git a/src/Air.zig b/src/Air.zig index e2eeae1130..60e6e9933d 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -308,10 +308,6 @@ pub const Inst = struct { operand: Ref, payload: u32, }, - constant: struct { - ty: Type, - val: Value, - }, dbg_stmt: struct { line: u32, column: u32, diff --git a/src/Liveness.zig b/src/Liveness.zig index 838f19d4a1..98af9eb429 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -299,8 +299,6 @@ fn analyzeInst( const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); const body = a.air.extra[extra.end..][0..extra.data.body_len]; try analyzeWithContext(a, new_set, body); - // We let this continue so that it can possibly mark the block as - // unreferenced below. return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }); }, .loop => { diff --git a/src/Module.zig b/src/Module.zig index 4bd48dad05..94d8b63744 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1185,7 +1185,7 @@ pub const Scope = struct { block_inst: Air.Inst.Index, /// Separate array list from break_inst_list so that it can be passed directly /// to resolvePeerTypes. - results: ArrayListUnmanaged(Air.Inst.Index), + results: ArrayListUnmanaged(Air.Inst.Ref), /// Keeps track of the break instructions so that the operand can be replaced /// if we need to add type coercion at the end of block analysis. /// Same indexes, capacity, length as `results`. diff --git a/src/Sema.zig b/src/Sema.zig index 48ad8d97fc..b4e8cd5af5 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -163,36 +163,36 @@ pub fn analyzeBody( const air_inst: Air.Inst.Ref = switch (tags[inst]) { // zig fmt: off .arg => try sema.zirArg(block, inst), - //.alloc => try sema.zirAlloc(block, inst), - //.alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), - //.alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), - //.alloc_inferred_comptime => try sema.zirAllocInferredComptime(block, inst), - //.alloc_mut => try sema.zirAllocMut(block, inst), - //.alloc_comptime => try sema.zirAllocComptime(block, inst), - //.anyframe_type => try sema.zirAnyframeType(block, inst), - //.array_cat => try sema.zirArrayCat(block, inst), - //.array_mul => try sema.zirArrayMul(block, inst), - //.array_type => try sema.zirArrayType(block, inst), - //.array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), - //.vector_type => try sema.zirVectorType(block, inst), - //.as => try sema.zirAs(block, inst), - //.as_node => try sema.zirAsNode(block, inst), - //.bit_and => try sema.zirBitwise(block, inst, .bit_and), - //.bit_not => try sema.zirBitNot(block, inst), - //.bit_or => try sema.zirBitwise(block, inst, .bit_or), - //.bitcast => try sema.zirBitcast(block, inst), - //.bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst), - //.block => try sema.zirBlock(block, inst), - //.suspend_block => try sema.zirSuspendBlock(block, inst), - //.bool_not => try sema.zirBoolNot(block, inst), - //.bool_br_and => try sema.zirBoolBr(block, inst, false), - //.bool_br_or => try sema.zirBoolBr(block, inst, true), - //.c_import => try sema.zirCImport(block, inst), - //.call => try sema.zirCall(block, inst, .auto, false), - //.call_chkused => try sema.zirCall(block, inst, .auto, true), - //.call_compile_time => try sema.zirCall(block, inst, .compile_time, false), - //.call_nosuspend => try sema.zirCall(block, inst, .no_async, false), - //.call_async => try sema.zirCall(block, inst, .async_kw, false), + .alloc => try sema.zirAlloc(block, inst), + .alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), + .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), + .alloc_inferred_comptime => try sema.zirAllocInferredComptime(block, inst), + .alloc_mut => try sema.zirAllocMut(block, inst), + .alloc_comptime => try sema.zirAllocComptime(block, inst), + .anyframe_type => try sema.zirAnyframeType(block, inst), + .array_cat => try sema.zirArrayCat(block, inst), + .array_mul => try sema.zirArrayMul(block, inst), + .array_type => try sema.zirArrayType(block, inst), + .array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), + .vector_type => try sema.zirVectorType(block, inst), + .as => try sema.zirAs(block, inst), + .as_node => try sema.zirAsNode(block, inst), + .bit_and => try sema.zirBitwise(block, inst, .bit_and), + .bit_not => try sema.zirBitNot(block, inst), + .bit_or => try sema.zirBitwise(block, inst, .bit_or), + .bitcast => try sema.zirBitcast(block, inst), + .bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst), + .block => try sema.zirBlock(block, inst), + .suspend_block => try sema.zirSuspendBlock(block, inst), + .bool_not => try sema.zirBoolNot(block, inst), + .bool_br_and => try sema.zirBoolBr(block, inst, false), + .bool_br_or => try sema.zirBoolBr(block, inst, true), + .c_import => try sema.zirCImport(block, inst), + .call => try sema.zirCall(block, inst, .auto, false), + .call_chkused => try sema.zirCall(block, inst, .auto, true), + .call_compile_time => try sema.zirCall(block, inst, .compile_time, false), + .call_nosuspend => try sema.zirCall(block, inst, .no_async, false), + .call_async => try sema.zirCall(block, inst, .async_kw, false), .cmp_eq => try sema.zirCmp(block, inst, .eq), .cmp_gt => try sema.zirCmp(block, inst, .gt), .cmp_gte => try sema.zirCmp(block, inst, .gte), @@ -1957,24 +1957,23 @@ fn analyzeBlockBody( // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); - assert(child_block.instructions.items[child_block.instructions.items.len - 1].ty.isNoReturn()); + assert(sema.getTypeOf(indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn()); if (merges.results.items.len == 0) { // No need for a block instruction. We can put the new instructions // directly into the parent block. - const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items); - try parent_block.instructions.appendSlice(gpa, copied_instructions); - return copied_instructions[copied_instructions.len - 1]; + try parent_block.instructions.appendSlice(gpa, child_block.instructions.items); + return indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1]); } if (merges.results.items.len == 1) { const last_inst_index = child_block.instructions.items.len - 1; const last_inst = child_block.instructions.items[last_inst_index]; - if (last_inst.breakBlock()) |br_block| { + if (sema.getBreakBlock(last_inst)) |br_block| { if (br_block == merges.block_inst) { // No need for a block instruction. We can put the new instructions directly // into the parent block. Here we omit the break instruction. - const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items[0..last_inst_index]); - try parent_block.instructions.appendSlice(gpa, copied_instructions); + const without_break = child_block.instructions.items[0..last_inst_index]; + try parent_block.instructions.appendSlice(gpa, without_break); return merges.results.items[0]; } } @@ -1998,36 +1997,50 @@ fn analyzeBlockBody( // Now that the block has its type resolved, we need to go back into all the break // instructions, and insert type coercion on the operands. for (merges.br_list.items) |br| { - if (sema.getTypeOf(br.operand).eql(resolved_ty)) { + const br_operand = sema.air_instructions.items(.data)[br].br.operand; + const br_operand_src = src; + const br_operand_ty = sema.getTypeOf(br_operand); + if (br_operand_ty.eql(resolved_ty)) { // No type coercion needed. continue; } var coerce_block = parent_block.makeSubBlock(); defer coerce_block.instructions.deinit(gpa); - const coerced_operand = try sema.coerce(&coerce_block, resolved_ty, br.operand, br.operand.src); + const coerced_operand = try sema.coerce(&coerce_block, resolved_ty, br_operand, br_operand_src); // If no instructions were produced, such as in the case of a coercion of a // constant value to a new type, we can simply point the br operand to it. if (coerce_block.instructions.items.len == 0) { - br.operand = coerced_operand; + sema.air_instructions.items(.data)[br].br.operand = coerced_operand; continue; } - assert(coerce_block.instructions.items[coerce_block.instructions.items.len - 1] == coerced_operand); - // Here we depend on the br instruction having been over-allocated (if necessary) - // inside zirBreak so that it can be converted into a br_block_flat instruction. - const br_src = br.base.src; - const br_ty = br.base.ty; - const br_block_flat = @ptrCast(*Inst.BrBlockFlat, br); - br_block_flat.* = .{ - .base = .{ - .src = br_src, - .ty = br_ty, - .tag = .br_block_flat, - }, - .block = merges.block_inst, - .body = .{ - .instructions = try sema.arena.dupe(Air.Inst.Index, coerce_block.instructions.items), - }, - }; + assert(coerce_block.instructions.items[coerce_block.instructions.items.len - 1] == + refToIndex(coerced_operand).?); + + // Convert the br operand to a block. + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + + coerce_block.instructions.items.len); + try sema.air_instructions.ensureUnusedCapacity(gpa, 2); + const sub_block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const sub_br_inst = sub_block_inst + 1; + sema.air_instructions.items(.data)[br].br.operand = indexToRef(sub_block_inst); + sema.air_instructions.appendAssumeCapacity(.{ + .tag = .block, + .data = .{ .ty_pl = .{ + .ty = try sema.addType(br_operand_ty), + .payload = sema.addExtraAssumeCapacity(Air.Block{ + .body_len = @intCast(u32, coerce_block.instructions.items.len), + }), + } }, + }); + sema.air_extra.appendSliceAssumeCapacity(coerce_block.instructions.items); + sema.air_extra.appendAssumeCapacity(sub_br_inst); + sema.air_instructions.appendAssumeCapacity(.{ + .tag = .br, + .data = .{ .br = .{ + .block_inst = sub_block_inst, + .operand = coerced_operand, + } }, + }); } return indexToRef(merges.block_inst); } @@ -2257,10 +2270,11 @@ fn analyzeCall( ensure_result_used: bool, args: []const Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - if (func.ty.zigTypeTag() != .Fn) - return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func.ty}); + const func_ty = sema.getTypeOf(func); + if (func_ty.zigTypeTag() != .Fn) + return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func_ty}); - const cc = func.ty.fnCallingConvention(); + const cc = func_ty.fnCallingConvention(); if (cc == .Naked) { // TODO add error note: declared here return sema.mod.fail( @@ -2270,8 +2284,8 @@ fn analyzeCall( .{}, ); } - const fn_params_len = func.ty.fnParamLen(); - if (func.ty.fnIsVarArgs()) { + const fn_params_len = func_ty.fnParamLen(); + if (func_ty.fnIsVarArgs()) { assert(cc == .C); if (args.len < fn_params_len) { // TODO add error note: declared here @@ -2310,11 +2324,9 @@ fn analyzeCall( const gpa = sema.gpa; - const ret_type = func.ty.fnReturnType(); - const is_comptime_call = block.is_comptime or modifier == .compile_time; const is_inline_call = is_comptime_call or modifier == .always_inline or - func.ty.fnCallingConvention() == .Inline; + func_ty.fnCallingConvention() == .Inline; const result: Air.Inst.Ref = if (is_inline_call) res: { const func_val = try sema.resolveConstValue(block, func_src, func); const module_fn = switch (func_val.tag()) { @@ -2400,7 +2412,19 @@ fn analyzeCall( break :res result; } else res: { try sema.requireRuntimeBlock(block, call_src); - break :res try block.addCall(call_src, ret_type, func, args); + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).Struct.fields.len + + args.len); + const func_inst = try block.addInst(.{ + .tag = .call, + .data = .{ .pl_op = .{ + .operand = func, + .payload = sema.addExtraAssumeCapacity(Air.Call{ + .args_len = @intCast(u32, args.len), + }), + } }, + }); + sema.appendRefsAssumeCapacity(args); + break :res func_inst; }; if (ensure_result_used) { @@ -8140,3 +8164,17 @@ pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { } return result; } + +fn appendRefsAssumeCapacity(sema: *Sema, refs: []const Air.Inst.Ref) void { + const coerced = @bitCast([]const u32, refs); + sema.air_extra.appendSliceAssumeCapacity(coerced); +} + +fn getBreakBlock(sema: *Sema, inst_index: Air.Inst.Index) ?Air.Inst.Index { + const air_datas = sema.air_instructions.items(.data); + const air_tags = sema.air_instructions.items(.tag); + switch (air_tags[inst_index]) { + .br => return air_datas[inst_index].br.block_inst, + else => return null, + } +} -- cgit v1.2.3 From 1294ebe1f5eaca1f11d68284d1b96419d53253be Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Jul 2021 22:44:57 -0700 Subject: Sema: AIR memory layout reworking for noreturn instructions --- src/Module.zig | 2 +- src/Sema.zig | 88 ++++++++++++++++++++++++++-------------------------------- 2 files changed, 41 insertions(+), 49 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Module.zig b/src/Module.zig index 94d8b63744..fb514ccbd2 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1255,7 +1255,7 @@ pub const Scope = struct { pub fn addNoOp(block: *Block, tag: Air.Inst.Tag) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, - .data = .no_op, + .data = .{ .no_op = {} }, }); } diff --git a/src/Sema.zig b/src/Sema.zig index b4e8cd5af5..d33d5bd49b 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -381,20 +381,20 @@ pub fn analyzeBody( .sub => try sema.zirArithmetic(block, inst), .subwrap => try sema.zirArithmetic(block, inst), - //// Instructions that we know to *always* be noreturn based solely on their tag. - //// These functions match the return type of analyzeBody so that we can - //// tail call them here. - //.break_inline => return inst, - //.condbr => return sema.zirCondbr(block, inst), - //.@"break" => return sema.zirBreak(block, inst), - //.compile_error => return sema.zirCompileError(block, inst), - //.ret_coerce => return sema.zirRetCoerce(block, inst, true), - //.ret_node => return sema.zirRetNode(block, inst), - //.ret_err_value => return sema.zirRetErrValue(block, inst), - //.@"unreachable" => return sema.zirUnreachable(block, inst), - //.repeat => return sema.zirRepeat(block, inst), - //.panic => return sema.zirPanic(block, inst), - //// zig fmt: on + // Instructions that we know to *always* be noreturn based solely on their tag. + // These functions match the return type of analyzeBody so that we can + // tail call them here. + .break_inline => return inst, + .condbr => return sema.zirCondbr(block, inst), + .@"break" => return sema.zirBreak(block, inst), + .compile_error => return sema.zirCompileError(block, inst), + .ret_coerce => return sema.zirRetCoerce(block, inst, true), + .ret_node => return sema.zirRetNode(block, inst), + .ret_err_value => return sema.zirRetErrValue(block, inst), + .@"unreachable" => return sema.zirUnreachable(block, inst), + .repeat => return sema.zirRepeat(block, inst), + .panic => return sema.zirPanic(block, inst), + // zig fmt: on //// Instructions that we know can *never* be noreturn based solely on //// their tag. We avoid needlessly checking if they are noreturn and @@ -534,7 +534,7 @@ pub fn analyzeBody( return break_inst; } }, - else => @panic("TODO finish updating Sema for AIR memory layout changes and then remove this else prong"), + else => |t| @panic(@tagName(t)), }; if (sema.getTypeOf(air_inst).isNoReturn()) return always_noreturn; @@ -2128,7 +2128,6 @@ fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) Compil defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].@"break"; - const src = sema.src; const operand = sema.resolveInst(inst_data.operand); const zir_block = inst_data.block_inst; @@ -2136,26 +2135,9 @@ fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) Compil while (true) { if (block.label) |label| { if (label.zir_block == zir_block) { - // Here we add a br instruction, but we over-allocate a little bit - // (if necessary) to make it possible to convert the instruction into - // a br_block_flat instruction later. - const br = @ptrCast(*Inst.Br, try sema.arena.alignedAlloc( - u8, - Inst.convertable_br_align, - Inst.convertable_br_size, - )); - br.* = .{ - .base = .{ - .tag = .br, - .ty = Type.initTag(.noreturn), - .src = src, - }, - .operand = operand, - .block = label.merges.block_inst, - }; - try start_block.instructions.append(sema.gpa, &br.base); + const br_ref = try start_block.addBr(label.merges.block_inst, operand); try label.merges.results.append(sema.gpa, operand); - try label.merges.br_list.append(sema.gpa, br); + try label.merges.br_list.append(sema.gpa, refToIndex(br_ref).?); return inst; } } @@ -5391,25 +5373,35 @@ fn zirCondbr( return always_noreturn; } + const gpa = sema.gpa; + + // We'll re-use the sub block to save on memory bandwidth, and yank out the + // instructions array in between using it for the then block and else block. var sub_block = parent_block.makeSubBlock(); sub_block.runtime_loop = null; - sub_block.runtime_cond = cond.src; + sub_block.runtime_cond = cond_src; sub_block.runtime_index += 1; - defer sub_block.instructions.deinit(sema.gpa); + defer sub_block.instructions.deinit(gpa); _ = try sema.analyzeBody(&sub_block, then_body); - const air_then_body: ir.Body = .{ - .instructions = try sema.arena.dupe(Air.Inst.Index, sub_block.instructions.items), - }; - - sub_block.instructions.shrinkRetainingCapacity(0); + const true_instructions = sub_block.instructions.toOwnedSlice(gpa); + defer gpa.free(true_instructions); _ = try sema.analyzeBody(&sub_block, else_body); - const air_else_body: ir.Body = .{ - .instructions = try sema.arena.dupe(Air.Inst.Index, sub_block.instructions.items), - }; - - _ = try parent_block.addCondBr(src, cond, air_then_body, air_else_body); + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + + true_instructions.len + sub_block.instructions.items.len); + _ = try parent_block.addInst(.{ + .tag = .cond_br, + .data = .{ .pl_op = .{ + .operand = cond, + .payload = sema.addExtraAssumeCapacity(Air.CondBr{ + .then_body_len = @intCast(u32, true_instructions.len), + .else_body_len = @intCast(u32, sub_block.instructions.items.len), + }), + } }, + }); + sema.air_extra.appendSliceAssumeCapacity(true_instructions); + sema.air_extra.appendSliceAssumeCapacity(sub_block.instructions.items); return always_noreturn; } @@ -6443,7 +6435,7 @@ fn panicWithMsg( try mod.optionalType(arena, ptr_stack_trace_ty), Value.initTag(.null_value), ); - const args = try arena.create([2]Air.Inst.Index); + const args = try arena.create([2]Air.Inst.Ref); args.* = .{ msg_inst, null_stack_trace }; _ = try sema.analyzeCall(block, panic_fn, src, src, .auto, false, args); return always_noreturn; -- cgit v1.2.3 From eadbee2041bba1cd03b24d8f30161025af8e3590 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 15 Jul 2021 15:52:06 -0700 Subject: stage2: first pass at printing AIR/Liveness to text * some instructions are not implemented yet * fix off-by-1 in Air.getMainBody * Compilation: use `@import("builtin")` rather than `std.builtin` for the values that are different for different build configurations. * Sema: avoid calling `addType` in between air_instructions.ensureUnusedCapacity and corresponding appendAssumeCapacity because it can possibly add an instruction. * Value: functions print their names --- BRANCH_TODO | 566 ---------------------------------------------------- src/Air.zig | 4 +- src/Compilation.zig | 9 +- src/Module.zig | 3 +- src/Sema.zig | 3 +- src/print_air.zig | 294 +++++++++++++++++++++++++++ src/value.zig | 2 +- 7 files changed, 307 insertions(+), 574 deletions(-) delete mode 100644 BRANCH_TODO create mode 100644 src/print_air.zig (limited to 'src/Module.zig') diff --git a/BRANCH_TODO b/BRANCH_TODO deleted file mode 100644 index 9055cda307..0000000000 --- a/BRANCH_TODO +++ /dev/null @@ -1,566 +0,0 @@ - * be sure to test debug info of parameters - - - pub fn specialOperandDeaths(self: Inst) bool { - return (self.deaths & (1 << deaths_bits)) != 0; - } - - /// Returns `null` if runtime-known. - /// Should be called by codegen, not by Sema. Sema functions should call - /// `resolvePossiblyUndefinedValue` or `resolveDefinedValue` instead. - /// TODO audit Sema code for violations to the above guidance. - pub fn value(base: *Inst) ?Value { - if (base.ty.onePossibleValue()) |opv| return opv; - - const inst = base.castTag(.constant) orelse return null; - return inst.val; - } - - - -/// For debugging purposes, prints a function representation to stderr. -pub fn dumpFn(old_module: Module, module_fn: *Module.Fn) void { - const allocator = old_module.gpa; - var ctx: DumpAir = .{ - .allocator = allocator, - .arena = std.heap.ArenaAllocator.init(allocator), - .old_module = &old_module, - .module_fn = module_fn, - .indent = 2, - .inst_table = DumpAir.InstTable.init(allocator), - .partial_inst_table = DumpAir.InstTable.init(allocator), - .const_table = DumpAir.InstTable.init(allocator), - }; - defer ctx.inst_table.deinit(); - defer ctx.partial_inst_table.deinit(); - defer ctx.const_table.deinit(); - defer ctx.arena.deinit(); - - switch (module_fn.state) { - .queued => std.debug.print("(queued)", .{}), - .inline_only => std.debug.print("(inline_only)", .{}), - .in_progress => std.debug.print("(in_progress)", .{}), - .sema_failure => std.debug.print("(sema_failure)", .{}), - .dependency_failure => std.debug.print("(dependency_failure)", .{}), - .success => { - const writer = std.io.getStdErr().writer(); - ctx.dump(module_fn.body, writer) catch @panic("failed to dump AIR"); - }, - } -} - -const DumpAir = struct { - allocator: *std.mem.Allocator, - arena: std.heap.ArenaAllocator, - old_module: *const Module, - module_fn: *Module.Fn, - indent: usize, - inst_table: InstTable, - partial_inst_table: InstTable, - const_table: InstTable, - next_index: usize = 0, - next_partial_index: usize = 0, - next_const_index: usize = 0, - - const InstTable = std.AutoArrayHashMap(*Inst, usize); - - /// TODO: Improve this code to include a stack of Body and store the instructions - /// in there. Now we are putting all the instructions in a function local table, - /// however instructions that are in a Body can be thown away when the Body ends. - fn dump(dtz: *DumpAir, body: Body, writer: std.fs.File.Writer) !void { - // First pass to pre-populate the table so that we can show even invalid references. - // Must iterate the same order we iterate the second time. - // We also look for constants and put them in the const_table. - try dtz.fetchInstsAndResolveConsts(body); - - std.debug.print("Module.Function(name={s}):\n", .{dtz.module_fn.owner_decl.name}); - - var it = dtz.const_table.iterator(); - while (it.next()) |entry| { - const constant = entry.key_ptr.*.castTag(.constant).?; - try writer.print(" @{d}: {} = {};\n", .{ - entry.value_ptr.*, constant.base.ty, constant.val, - }); - } - - return dtz.dumpBody(body, writer); - } - - fn fetchInstsAndResolveConsts(dtz: *DumpAir, body: Body) error{OutOfMemory}!void { - for (body.instructions) |inst| { - try dtz.inst_table.put(inst, dtz.next_index); - dtz.next_index += 1; - switch (inst.tag) { - .alloc, - .retvoid, - .unreach, - .breakpoint, - .dbg_stmt, - .arg, - => {}, - - .ref, - .ret, - .bitcast, - .not, - .is_non_null, - .is_non_null_ptr, - .is_null, - .is_null_ptr, - .is_err, - .is_non_err, - .is_err_ptr, - .is_non_err_ptr, - .ptrtoint, - .floatcast, - .intcast, - .load, - .optional_payload, - .optional_payload_ptr, - .wrap_optional, - .wrap_errunion_payload, - .wrap_errunion_err, - .unwrap_errunion_payload, - .unwrap_errunion_err, - .unwrap_errunion_payload_ptr, - .unwrap_errunion_err_ptr, - => { - const un_op = inst.cast(Inst.UnOp).?; - try dtz.findConst(un_op.operand); - }, - - .add, - .addwrap, - .sub, - .subwrap, - .mul, - .mulwrap, - .div, - .cmp_lt, - .cmp_lte, - .cmp_eq, - .cmp_gte, - .cmp_gt, - .cmp_neq, - .store, - .bool_and, - .bool_or, - .bit_and, - .bit_or, - .xor, - => { - const bin_op = inst.cast(Inst.BinOp).?; - try dtz.findConst(bin_op.lhs); - try dtz.findConst(bin_op.rhs); - }, - - .br => { - const br = inst.castTag(.br).?; - try dtz.findConst(&br.block.base); - try dtz.findConst(br.operand); - }, - - .br_block_flat => { - const br_block_flat = inst.castTag(.br_block_flat).?; - try dtz.findConst(&br_block_flat.block.base); - try dtz.fetchInstsAndResolveConsts(br_block_flat.body); - }, - - .br_void => { - const br_void = inst.castTag(.br_void).?; - try dtz.findConst(&br_void.block.base); - }, - - .block => { - const block = inst.castTag(.block).?; - try dtz.fetchInstsAndResolveConsts(block.body); - }, - - .condbr => { - const condbr = inst.castTag(.condbr).?; - try dtz.findConst(condbr.condition); - try dtz.fetchInstsAndResolveConsts(condbr.then_body); - try dtz.fetchInstsAndResolveConsts(condbr.else_body); - }, - .switchbr => { - const switchbr = inst.castTag(.switchbr).?; - try dtz.findConst(switchbr.target); - try dtz.fetchInstsAndResolveConsts(switchbr.else_body); - for (switchbr.cases) |case| { - try dtz.fetchInstsAndResolveConsts(case.body); - } - }, - - .loop => { - const loop = inst.castTag(.loop).?; - try dtz.fetchInstsAndResolveConsts(loop.body); - }, - .call => { - const call = inst.castTag(.call).?; - try dtz.findConst(call.func); - for (call.args) |arg| { - try dtz.findConst(arg); - } - }, - .struct_field_ptr => { - const struct_field_ptr = inst.castTag(.struct_field_ptr).?; - try dtz.findConst(struct_field_ptr.struct_ptr); - }, - - // TODO fill out this debug printing - .assembly, - .constant, - .varptr, - => {}, - } - } - } - - fn dumpBody(dtz: *DumpAir, body: Body, writer: std.fs.File.Writer) (std.fs.File.WriteError || error{OutOfMemory})!void { - for (body.instructions) |inst| { - const my_index = dtz.next_partial_index; - try dtz.partial_inst_table.put(inst, my_index); - dtz.next_partial_index += 1; - - try writer.writeByteNTimes(' ', dtz.indent); - try writer.print("%{d}: {} = {s}(", .{ - my_index, inst.ty, @tagName(inst.tag), - }); - switch (inst.tag) { - .alloc, - .retvoid, - .unreach, - .breakpoint, - .dbg_stmt, - => try writer.writeAll(")\n"), - - .ref, - .ret, - .bitcast, - .not, - .is_non_null, - .is_non_null_ptr, - .is_null, - .is_null_ptr, - .is_err, - .is_err_ptr, - .is_non_err, - .is_non_err_ptr, - .ptrtoint, - .floatcast, - .intcast, - .load, - .optional_payload, - .optional_payload_ptr, - .wrap_optional, - .wrap_errunion_err, - .wrap_errunion_payload, - .unwrap_errunion_err, - .unwrap_errunion_payload, - .unwrap_errunion_payload_ptr, - .unwrap_errunion_err_ptr, - => { - const un_op = inst.cast(Inst.UnOp).?; - const kinky = try dtz.writeInst(writer, un_op.operand); - if (kinky != null) { - try writer.writeAll(") // Instruction does not dominate all uses!\n"); - } else { - try writer.writeAll(")\n"); - } - }, - - .add, - .addwrap, - .sub, - .subwrap, - .mul, - .mulwrap, - .div, - .cmp_lt, - .cmp_lte, - .cmp_eq, - .cmp_gte, - .cmp_gt, - .cmp_neq, - .store, - .bool_and, - .bool_or, - .bit_and, - .bit_or, - .xor, - => { - const bin_op = inst.cast(Inst.BinOp).?; - - const lhs_kinky = try dtz.writeInst(writer, bin_op.lhs); - try writer.writeAll(", "); - const rhs_kinky = try dtz.writeInst(writer, bin_op.rhs); - - if (lhs_kinky != null or rhs_kinky != null) { - try writer.writeAll(") // Instruction does not dominate all uses!"); - if (lhs_kinky) |lhs| { - try writer.print(" %{d}", .{lhs}); - } - if (rhs_kinky) |rhs| { - try writer.print(" %{d}", .{rhs}); - } - try writer.writeAll("\n"); - } else { - try writer.writeAll(")\n"); - } - }, - - .arg => { - const arg = inst.castTag(.arg).?; - try writer.print("{s})\n", .{arg.name}); - }, - - .br => { - const br = inst.castTag(.br).?; - - const lhs_kinky = try dtz.writeInst(writer, &br.block.base); - try writer.writeAll(", "); - const rhs_kinky = try dtz.writeInst(writer, br.operand); - - if (lhs_kinky != null or rhs_kinky != null) { - try writer.writeAll(") // Instruction does not dominate all uses!"); - if (lhs_kinky) |lhs| { - try writer.print(" %{d}", .{lhs}); - } - if (rhs_kinky) |rhs| { - try writer.print(" %{d}", .{rhs}); - } - try writer.writeAll("\n"); - } else { - try writer.writeAll(")\n"); - } - }, - - .br_block_flat => { - const br_block_flat = inst.castTag(.br_block_flat).?; - const block_kinky = try dtz.writeInst(writer, &br_block_flat.block.base); - if (block_kinky != null) { - try writer.writeAll(", { // Instruction does not dominate all uses!\n"); - } else { - try writer.writeAll(", {\n"); - } - - const old_indent = dtz.indent; - dtz.indent += 2; - try dtz.dumpBody(br_block_flat.body, writer); - dtz.indent = old_indent; - - try writer.writeByteNTimes(' ', dtz.indent); - try writer.writeAll("})\n"); - }, - - .br_void => { - const br_void = inst.castTag(.br_void).?; - const kinky = try dtz.writeInst(writer, &br_void.block.base); - if (kinky) |_| { - try writer.writeAll(") // Instruction does not dominate all uses!\n"); - } else { - try writer.writeAll(")\n"); - } - }, - - .block => { - const block = inst.castTag(.block).?; - - try writer.writeAll("{\n"); - - const old_indent = dtz.indent; - dtz.indent += 2; - try dtz.dumpBody(block.body, writer); - dtz.indent = old_indent; - - try writer.writeByteNTimes(' ', dtz.indent); - try writer.writeAll("})\n"); - }, - - .condbr => { - const condbr = inst.castTag(.condbr).?; - - const condition_kinky = try dtz.writeInst(writer, condbr.condition); - if (condition_kinky != null) { - try writer.writeAll(", { // Instruction does not dominate all uses!\n"); - } else { - try writer.writeAll(", {\n"); - } - - const old_indent = dtz.indent; - dtz.indent += 2; - try dtz.dumpBody(condbr.then_body, writer); - - try writer.writeByteNTimes(' ', old_indent); - try writer.writeAll("}, {\n"); - - try dtz.dumpBody(condbr.else_body, writer); - dtz.indent = old_indent; - - try writer.writeByteNTimes(' ', old_indent); - try writer.writeAll("})\n"); - }, - - .switchbr => { - const switchbr = inst.castTag(.switchbr).?; - - const condition_kinky = try dtz.writeInst(writer, switchbr.target); - if (condition_kinky != null) { - try writer.writeAll(", { // Instruction does not dominate all uses!\n"); - } else { - try writer.writeAll(", {\n"); - } - const old_indent = dtz.indent; - - if (switchbr.else_body.instructions.len != 0) { - dtz.indent += 2; - try dtz.dumpBody(switchbr.else_body, writer); - - try writer.writeByteNTimes(' ', old_indent); - try writer.writeAll("}, {\n"); - dtz.indent = old_indent; - } - for (switchbr.cases) |case| { - dtz.indent += 2; - try dtz.dumpBody(case.body, writer); - - try writer.writeByteNTimes(' ', old_indent); - try writer.writeAll("}, {\n"); - dtz.indent = old_indent; - } - - try writer.writeByteNTimes(' ', old_indent); - try writer.writeAll("})\n"); - }, - - .loop => { - const loop = inst.castTag(.loop).?; - - try writer.writeAll("{\n"); - - const old_indent = dtz.indent; - dtz.indent += 2; - try dtz.dumpBody(loop.body, writer); - dtz.indent = old_indent; - - try writer.writeByteNTimes(' ', dtz.indent); - try writer.writeAll("})\n"); - }, - - .call => { - const call = inst.castTag(.call).?; - - const args_kinky = try dtz.allocator.alloc(?usize, call.args.len); - defer dtz.allocator.free(args_kinky); - std.mem.set(?usize, args_kinky, null); - var any_kinky_args = false; - - const func_kinky = try dtz.writeInst(writer, call.func); - - for (call.args) |arg, i| { - try writer.writeAll(", "); - - args_kinky[i] = try dtz.writeInst(writer, arg); - any_kinky_args = any_kinky_args or args_kinky[i] != null; - } - - if (func_kinky != null or any_kinky_args) { - try writer.writeAll(") // Instruction does not dominate all uses!"); - if (func_kinky) |func_index| { - try writer.print(" %{d}", .{func_index}); - } - for (args_kinky) |arg_kinky| { - if (arg_kinky) |arg_index| { - try writer.print(" %{d}", .{arg_index}); - } - } - try writer.writeAll("\n"); - } else { - try writer.writeAll(")\n"); - } - }, - - .struct_field_ptr => { - const struct_field_ptr = inst.castTag(.struct_field_ptr).?; - const kinky = try dtz.writeInst(writer, struct_field_ptr.struct_ptr); - if (kinky != null) { - try writer.print("{d}) // Instruction does not dominate all uses!\n", .{ - struct_field_ptr.field_index, - }); - } else { - try writer.print("{d})\n", .{struct_field_ptr.field_index}); - } - }, - - // TODO fill out this debug printing - .assembly, - .constant, - .varptr, - => { - try writer.writeAll("!TODO!)\n"); - }, - } - } - } - - fn writeInst(dtz: *DumpAir, writer: std.fs.File.Writer, inst: *Inst) !?usize { - if (dtz.partial_inst_table.get(inst)) |operand_index| { - try writer.print("%{d}", .{operand_index}); - return null; - } else if (dtz.const_table.get(inst)) |operand_index| { - try writer.print("@{d}", .{operand_index}); - return null; - } else if (dtz.inst_table.get(inst)) |operand_index| { - try writer.print("%{d}", .{operand_index}); - return operand_index; - } else { - try writer.writeAll("!BADREF!"); - return null; - } - } - - fn findConst(dtz: *DumpAir, operand: *Inst) !void { - if (operand.tag == .constant) { - try dtz.const_table.put(operand, dtz.next_const_index); - dtz.next_const_index += 1; - } - } -}; - -pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { - const zir_module = scope.namespace(); - const source = zir_module.getSource(mod) catch @panic("dumpInst failed to get source"); - const loc = std.zig.findLineColumn(source, inst.src); - if (inst.tag == .constant) { - std.debug.print("constant ty={} val={} src={s}:{d}:{d}\n", .{ - inst.ty, - inst.castTag(.constant).?.val, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } else if (inst.deaths == 0) { - std.debug.print("{s} ty={} src={s}:{d}:{d}\n", .{ - @tagName(inst.tag), - inst.ty, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } else { - std.debug.print("{s} ty={} deaths={b} src={s}:{d}:{d}\n", .{ - @tagName(inst.tag), - inst.ty, - inst.deaths, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } -} - - /// For debugging purposes. - pub fn dump(func: *Fn, mod: Module) void { - ir.dumpFn(mod, func); - } - diff --git a/src/Air.zig b/src/Air.zig index 60e6e9933d..a8b38b7659 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -374,8 +374,8 @@ pub const Asm = struct { pub fn getMainBody(air: Air) []const Air.Inst.Index { const body_index = air.extra[@enumToInt(ExtraIndex.main_block)]; - const body_len = air.extra[body_index]; - return air.extra[body_index..][0..body_len]; + const extra = air.extraData(Block, body_index); + return air.extra[extra.end..][0..extra.data.body_len]; } pub fn getType(air: Air, inst: Air.Inst.Index) Type { diff --git a/src/Compilation.zig b/src/Compilation.zig index f241ae6b10..50d1f5760e 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1,6 +1,7 @@ const Compilation = @This(); const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -907,7 +908,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation { // comptime conditions ((build_options.have_llvm and comptime std.Target.current.isDarwin()) and // runtime conditions - (use_lld and std.builtin.os.tag == .macos and options.target.isDarwin())); + (use_lld and builtin.os.tag == .macos and options.target.isDarwin())); const sysroot = blk: { if (options.sysroot) |sysroot| { @@ -2026,8 +2027,10 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); defer liveness.deinit(gpa); - if (std.builtin.mode == .Debug and self.verbose_air) { - @panic("TODO implement dumping AIR and liveness"); + if (builtin.mode == .Debug and self.verbose_air) { + std.debug.print("# Begin Function AIR: {s}:\n", .{decl.name}); + @import("print_air.zig").dump(gpa, air, liveness); + std.debug.print("# End Function AIR: {s}:\n", .{decl.name}); } assert(decl.ty.hasCodeGenBits()); diff --git a/src/Module.zig b/src/Module.zig index fb514ccbd2..f452824d33 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -3551,7 +3551,8 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) SemaError!Air { try sema.analyzeFnBody(&inner_block, func.zir_body_inst); // Copy the block into place and mark that as the main block. - try sema.air_extra.ensureUnusedCapacity(gpa, inner_block.instructions.items.len + 1); + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + + inner_block.instructions.items.len); const main_block_index = sema.addExtraAssumeCapacity(Air.Block{ .body_len = @intCast(u32, inner_block.instructions.items.len), }); diff --git a/src/Sema.zig b/src/Sema.zig index ac6755d24e..a144ce1d50 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2028,6 +2028,7 @@ fn analyzeBlockBody( refToIndex(coerced_operand).?); // Convert the br operand to a block. + const br_operand_ty_ref = try sema.addType(br_operand_ty); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + coerce_block.instructions.items.len); try sema.air_instructions.ensureUnusedCapacity(gpa, 2); @@ -2037,7 +2038,7 @@ fn analyzeBlockBody( sema.air_instructions.appendAssumeCapacity(.{ .tag = .block, .data = .{ .ty_pl = .{ - .ty = try sema.addType(br_operand_ty), + .ty = br_operand_ty_ref, .payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = @intCast(u32, coerce_block.instructions.items.len), }), diff --git a/src/print_air.zig b/src/print_air.zig new file mode 100644 index 0000000000..44c170a078 --- /dev/null +++ b/src/print_air.zig @@ -0,0 +1,294 @@ +const std = @import("std"); +const Allocator = std.mem.Allocator; +const fmtIntSizeBin = std.fmt.fmtIntSizeBin; + +const Module = @import("Module.zig"); +const Value = @import("value.zig").Value; +const Air = @import("Air.zig"); +const Liveness = @import("Liveness.zig"); + +pub fn dump(gpa: *Allocator, air: Air, liveness: Liveness) void { + const instruction_bytes = air.instructions.len * + // Here we don't use @sizeOf(Air.Inst.Data) because it would include + // the debug safety tag but we want to measure release size. + (@sizeOf(Air.Inst.Tag) + 8); + const extra_bytes = air.extra.len * @sizeOf(u32); + const values_bytes = air.values.len * @sizeOf(Value); + const variables_bytes = air.variables.len * @sizeOf(*Module.Var); + const tomb_bytes = liveness.tomb_bits.len * @sizeOf(usize); + const liveness_extra_bytes = liveness.extra.len * @sizeOf(u32); + const liveness_special_bytes = liveness.special.count() * 8; + const total_bytes = @sizeOf(Air) + instruction_bytes + extra_bytes + + values_bytes * variables_bytes + @sizeOf(Liveness) + liveness_extra_bytes + + liveness_special_bytes + tomb_bytes; + + // zig fmt: off + std.debug.print( + \\# Total AIR+Liveness bytes: {} + \\# AIR Instructions: {d} ({}) + \\# AIR Extra Data: {d} ({}) + \\# AIR Values Bytes: {d} ({}) + \\# AIR Variables Bytes: {d} ({}) + \\# Liveness tomb_bits: {} + \\# Liveness Extra Data: {d} ({}) + \\# Liveness special table: {d} ({}) + \\ + , .{ + fmtIntSizeBin(total_bytes), + air.instructions.len, fmtIntSizeBin(instruction_bytes), + air.extra.len, fmtIntSizeBin(extra_bytes), + air.values.len, fmtIntSizeBin(values_bytes), + air.variables.len, fmtIntSizeBin(variables_bytes), + fmtIntSizeBin(tomb_bytes), + liveness.extra.len, fmtIntSizeBin(liveness_extra_bytes), + liveness.special.count(), fmtIntSizeBin(liveness_special_bytes), + }); + // zig fmt: on + var arena = std.heap.ArenaAllocator.init(gpa); + defer arena.deinit(); + + var writer: Writer = .{ + .gpa = gpa, + .arena = &arena.allocator, + .air = air, + .liveness = liveness, + .indent = 0, + }; + const stream = std.io.getStdErr().writer(); + writer.writeAllConstants(stream) catch return; + writer.writeBody(stream, air.getMainBody()) catch return; +} + +const Writer = struct { + gpa: *Allocator, + arena: *Allocator, + air: Air, + liveness: Liveness, + indent: usize, + + fn writeAllConstants(w: *Writer, s: anytype) @TypeOf(s).Error!void { + for (w.air.instructions.items(.tag)) |tag, i| { + const inst = @intCast(u32, i); + switch (tag) { + .constant, .const_ty => { + try s.writeByteNTimes(' ', w.indent); + try s.print("%{d} ", .{inst}); + try w.writeInst(s, inst); + try s.writeAll(")\n"); + }, + else => continue, + } + } + } + + fn writeBody(w: *Writer, s: anytype, body: []const Air.Inst.Index) @TypeOf(s).Error!void { + for (body) |inst| { + try s.writeByteNTimes(' ', w.indent); + try s.print("%{d} ", .{inst}); + try w.writeInst(s, inst); + if (w.liveness.isUnused(inst)) { + try s.writeAll(") unused\n"); + } else { + try s.writeAll("\n"); + } + } + } + + fn writeInst(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const tags = w.air.instructions.items(.tag); + const tag = tags[inst]; + try s.print("= {s}(", .{@tagName(tags[inst])}); + switch (tag) { + .arg => try w.writeTyStr(s, inst), + + .add, + .addwrap, + .sub, + .subwrap, + .mul, + .mulwrap, + .div, + .bit_and, + .bit_or, + .xor, + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + .bool_and, + .bool_or, + .store, + => try w.writeBinOp(s, inst), + + .is_null, + .is_non_null, + .is_null_ptr, + .is_non_null_ptr, + .is_err, + .is_non_err, + .is_err_ptr, + .is_non_err_ptr, + .ptrtoint, + .ret, + => try w.writeUnOp(s, inst), + + .breakpoint, + .unreach, + => try w.writeNoOp(s, inst), + + .const_ty, + .alloc, + => try w.writeTy(s, inst), + + .not, + .bitcast, + .load, + .ref, + .floatcast, + .intcast, + .optional_payload, + .optional_payload_ptr, + .wrap_optional, + .unwrap_errunion_payload, + .unwrap_errunion_err, + .unwrap_errunion_payload_ptr, + .unwrap_errunion_err_ptr, + .wrap_errunion_payload, + .wrap_errunion_err, + => try w.writeTyOp(s, inst), + + .block, + .loop, + => try w.writeBlock(s, inst), + + .struct_field_ptr => try w.writeStructFieldPtr(s, inst), + .varptr => try w.writeVarPtr(s, inst), + .constant => try w.writeConstant(s, inst), + .assembly => try w.writeAssembly(s, inst), + .dbg_stmt => try w.writeDbgStmt(s, inst), + .call => try w.writeCall(s, inst), + .br => try w.writeBr(s, inst), + .cond_br => try w.writeCondBr(s, inst), + .switch_br => try w.writeSwitchBr(s, inst), + } + } + + fn writeTyStr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeBinOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeUnOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeNoOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeTy(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const ty = w.air.instructions.items(.data)[inst].ty; + try s.print("{}", .{ty}); + } + + fn writeTyOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeBlock(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeStructFieldPtr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeVarPtr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeConstant(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; + const val = w.air.values[ty_pl.payload]; + try s.print("{}, {}", .{ ty_pl.ty, val }); + } + + fn writeAssembly(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeDbgStmt(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const dbg_stmt = w.air.instructions.items(.data)[inst].dbg_stmt; + try s.print("{d}:{d}", .{ dbg_stmt.line + 1, dbg_stmt.column + 1 }); + } + + fn writeCall(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const pl_op = w.air.instructions.items(.data)[inst].pl_op; + const extra = w.air.extraData(Air.Call, pl_op.payload); + const args = w.air.extra[extra.end..][0..extra.data.args_len]; + try w.writeInstRef(s, pl_op.operand); + try s.writeAll(", ["); + for (args) |arg, i| { + if (i != 0) try s.writeAll(", "); + try w.writeInstRef(s, @intToEnum(Air.Inst.Ref, arg)); + } + try s.writeAll("]"); + } + + fn writeBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeCondBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeSwitchBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeInstRef(w: *Writer, s: anytype, inst: Air.Inst.Ref) @TypeOf(s).Error!void { + var i: usize = @enumToInt(inst); + + if (i < Air.Inst.Ref.typed_value_map.len) { + return s.print("@{}", .{inst}); + } + i -= Air.Inst.Ref.typed_value_map.len; + + return w.writeInstIndex(s, @intCast(Air.Inst.Index, i)); + } + + fn writeInstIndex(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + return s.print("%{d}", .{inst}); + } +}; diff --git a/src/value.zig b/src/value.zig index df3a97b09a..abb2ea7b1e 100644 --- a/src/value.zig +++ b/src/value.zig @@ -573,7 +573,7 @@ pub const Value = extern union { .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", options, out_stream), .int_big_positive => return out_stream.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), .int_big_negative => return out_stream.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), - .function => return out_stream.writeAll("(function)"), + .function => return out_stream.print("(function '{s}')", .{val.castTag(.function).?.data.owner_decl.name}), .extern_fn => return out_stream.writeAll("(extern function)"), .variable => return out_stream.writeAll("(variable)"), .ref_val => { -- cgit v1.2.3 From 8082660118bba78de00e1e103e53730a87b2b70f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Jul 2021 18:22:18 -0700 Subject: stage2: codegen.zig updated to new AIR memory layout --- src/Air.zig | 143 +++++- src/AstGen.zig | 77 ++-- src/Liveness.zig | 54 ++- src/Module.zig | 4 +- src/Sema.zig | 150 +------ src/Zir.zig | 6 +- src/codegen.zig | 1321 +++++++++++++++++++++++++++++------------------------- 7 files changed, 946 insertions(+), 809 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Air.zig b/src/Air.zig index a8b38b7659..f4c4fa4155 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -13,9 +13,9 @@ const Air = @This(); instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. /// The first few indexes are reserved. See `ExtraIndex` for the values. -extra: []u32, -values: []Value, -variables: []*Module.Var, +extra: []const u32, +values: []const Value, +variables: []const *Module.Var, pub const ExtraIndex = enum(u32) { /// Payload index of the main `Block` in the `extra` array. @@ -378,22 +378,109 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { return air.extra[extra.end..][0..extra.data.body_len]; } -pub fn getType(air: Air, inst: Air.Inst.Index) Type { - _ = air; - _ = inst; - @panic("TODO Air getType"); +pub fn typeOf(air: Air, inst: Air.Inst.Ref) Type { + const ref_int = @enumToInt(inst); + if (ref_int < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[ref_int].ty; + } + return air.typeOfIndex(@intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len)); +} + +pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { + const datas = air.instructions.items(.data); + switch (air.instructions.items(.tag)[inst]) { + .arg => return air.getRefType(datas[inst].ty_str.ty), + + .add, + .addwrap, + .sub, + .subwrap, + .mul, + .mulwrap, + .div, + .bit_and, + .bit_or, + .xor, + => return air.typeOf(datas[inst].bin_op.lhs), + + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + .is_null, + .is_non_null, + .is_null_ptr, + .is_non_null_ptr, + .is_err, + .is_non_err, + .is_err_ptr, + .is_non_err_ptr, + .bool_and, + .bool_or, + => return Type.initTag(.bool), + + .const_ty => return Type.initTag(.type), + + .alloc => return datas[inst].ty, + + .assembly, + .block, + .constant, + .varptr, + .struct_field_ptr, + => return air.getRefType(datas[inst].ty_pl.ty), + + .not, + .bitcast, + .load, + .ref, + .floatcast, + .intcast, + .optional_payload, + .optional_payload_ptr, + .wrap_optional, + .unwrap_errunion_payload, + .unwrap_errunion_err, + .unwrap_errunion_payload_ptr, + .unwrap_errunion_err_ptr, + .wrap_errunion_payload, + .wrap_errunion_err, + => return air.getRefType(datas[inst].ty_op.ty), + + .loop, + .br, + .cond_br, + .switch_br, + .ret, + .unreach, + => return Type.initTag(.noreturn), + + .breakpoint, + .dbg_stmt, + .store, + => return Type.initTag(.void), + + .ptrtoint => return Type.initTag(.usize), + + .call => { + const callee_ty = air.typeOf(datas[inst].pl_op.operand); + return callee_ty.fnReturnType(); + }, + } } pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { - var i: usize = @enumToInt(ref); - if (i < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; + const ref_int = @enumToInt(ref); + if (ref_int < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[ref_int].val.toType(undefined) catch unreachable; } - i -= Air.Inst.Ref.typed_value_map.len; + const inst_index = ref_int - Air.Inst.Ref.typed_value_map.len; const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); - assert(air_tags[i] == .const_ty); - return air_datas[i].ty; + assert(air_tags[inst_index] == .const_ty); + return air_datas[inst_index].ty; } /// Returns the requested data, as well as the new index which is at the start of the @@ -424,3 +511,33 @@ pub fn deinit(air: *Air, gpa: *std.mem.Allocator) void { gpa.free(air.variables); air.* = undefined; } + +const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; + +pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { + return @intToEnum(Air.Inst.Ref, ref_start_index + inst); +} + +pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { + const ref_int = @enumToInt(inst); + if (ref_int >= ref_start_index) { + return ref_int - ref_start_index; + } else { + return null; + } +} + +/// Returns `null` if runtime-known. +pub fn value(air: Air, inst: Air.Inst.Ref) ?Value { + const ref_int = @enumToInt(inst); + if (ref_int < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[ref_int].val; + } + const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const air_datas = air.instructions.items(.data); + switch (air.instructions.items(.tag)[inst_index]) { + .constant => return air.values[air_datas[inst_index].ty_pl.payload], + .const_ty => unreachable, + else => return air.typeOfIndex(inst_index).onePossibleValue(), + } +} diff --git a/src/AstGen.zig b/src/AstGen.zig index 1b58b3f2f7..cbd918ecc7 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -6412,37 +6412,12 @@ fn multilineStringLiteral( node: ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - - const start = node_datas[node].lhs; - const end = node_datas[node].rhs; - - const gpa = gz.astgen.gpa; - const string_bytes = &gz.astgen.string_bytes; - const str_index = string_bytes.items.len; - - // First line: do not append a newline. - var tok_i = start; - { - const slice = tree.tokenSlice(tok_i); - const line_bytes = slice[2 .. slice.len - 1]; - try string_bytes.appendSlice(gpa, line_bytes); - tok_i += 1; - } - // Following lines: each line prepends a newline. - while (tok_i <= end) : (tok_i += 1) { - const slice = tree.tokenSlice(tok_i); - const line_bytes = slice[2 .. slice.len - 1]; - try string_bytes.ensureCapacity(gpa, string_bytes.items.len + line_bytes.len + 1); - string_bytes.appendAssumeCapacity('\n'); - string_bytes.appendSliceAssumeCapacity(line_bytes); - } + const str = try astgen.strLitNodeAsString(node); const result = try gz.add(.{ .tag = .str, .data = .{ .str = .{ - .start = @intCast(u32, str_index), - .len = @intCast(u32, string_bytes.items.len - str_index), + .start = str.index, + .len = str.len, } }, }); return rvalue(gz, rl, result, node); @@ -6620,9 +6595,14 @@ fn asmExpr( const tree = astgen.tree; const main_tokens = tree.nodes.items(.main_token); const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); const token_tags = tree.tokens.items(.tag); - const asm_source = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, full.ast.template); + const asm_source = switch (node_tags[full.ast.template]) { + .string_literal => try astgen.strLitAsString(main_tokens[full.ast.template]), + .multiline_string_literal => try astgen.strLitNodeAsString(full.ast.template), + else => return astgen.failNode(node, "assembly code must use string literal syntax", .{}), + }; // See https://github.com/ziglang/zig/issues/215 and related issues discussing // possible inline assembly improvements. Until then here is status quo AstGen @@ -6752,7 +6732,7 @@ fn asmExpr( const result = try gz.addAsm(.{ .node = node, - .asm_source = asm_source, + .asm_source = asm_source.index, .is_volatile = full.volatile_token != null, .output_type_bits = output_type_bits, .outputs = outputs, @@ -8579,6 +8559,41 @@ fn strLitAsString(astgen: *AstGen, str_lit_token: ast.TokenIndex) !IndexSlice { } } +fn strLitNodeAsString(astgen: *AstGen, node: ast.Node.Index) !IndexSlice { + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + const start = node_datas[node].lhs; + const end = node_datas[node].rhs; + + const gpa = astgen.gpa; + const string_bytes = &astgen.string_bytes; + const str_index = string_bytes.items.len; + + // First line: do not append a newline. + var tok_i = start; + { + const slice = tree.tokenSlice(tok_i); + const line_bytes = slice[2 .. slice.len - 1]; + try string_bytes.appendSlice(gpa, line_bytes); + tok_i += 1; + } + // Following lines: each line prepends a newline. + while (tok_i <= end) : (tok_i += 1) { + const slice = tree.tokenSlice(tok_i); + const line_bytes = slice[2 .. slice.len - 1]; + try string_bytes.ensureCapacity(gpa, string_bytes.items.len + line_bytes.len + 1); + string_bytes.appendAssumeCapacity('\n'); + string_bytes.appendSliceAssumeCapacity(line_bytes); + } + const len = string_bytes.items.len - str_index; + try string_bytes.append(gpa, 0); + return IndexSlice{ + .index = @intCast(u32, str_index), + .len = @intCast(u32, len), + }; +} + fn testNameString(astgen: *AstGen, str_lit_token: ast.TokenIndex) !u32 { const gpa = astgen.gpa; const string_bytes = &astgen.string_bytes; @@ -9440,7 +9455,7 @@ const GenZir = struct { args: struct { /// Absolute node index. This function does the conversion to offset from Decl. node: ast.Node.Index, - asm_source: Zir.Inst.Ref, + asm_source: u32, output_type_bits: u32, is_volatile: bool, outputs: []const Zir.Inst.Asm.Output, diff --git a/src/Liveness.zig b/src/Liveness.zig index 98af9eb429..79fc0d7325 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -21,7 +21,7 @@ const Log2Int = std.math.Log2Int; /// operand dies after this instruction. /// Instructions which need more data to track liveness have special handling via the /// `special` table. -tomb_bits: []const usize, +tomb_bits: []usize, /// Sparse table of specially handled instructions. The value is an index into the `extra` /// array. The meaning of the data depends on the AIR tag. special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), @@ -98,7 +98,7 @@ pub fn operandDies(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) bool return (l.tomb_bits[usize_index] & mask) != 0; } -pub fn clearOperandDeath(l: *Liveness, inst: Air.Inst.Index, operand: OperandInt) void { +pub fn clearOperandDeath(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) void { assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); const mask = @as(usize, 1) << @@ -106,16 +106,40 @@ pub fn clearOperandDeath(l: *Liveness, inst: Air.Inst.Index, operand: OperandInt l.tomb_bits[usize_index] |= mask; } +/// Higher level API. +pub const CondBrSlices = struct { + then_deaths: []const Air.Inst.Index, + else_deaths: []const Air.Inst.Index, +}; + +pub fn getCondBr(l: Liveness, inst: Air.Inst.Index) CondBrSlices { + var index: usize = l.special.get(inst) orelse return .{ + .then_deaths = &.{}, + .else_deaths = &.{}, + }; + const then_death_count = l.extra[index]; + index += 1; + const else_death_count = l.extra[index]; + index += 1; + const then_deaths = l.extra[index..][0..then_death_count]; + index += then_death_count; + return .{ + .then_deaths = then_deaths, + .else_deaths = l.extra[index..][0..else_death_count], + }; +} + pub fn deinit(l: *Liveness, gpa: *Allocator) void { gpa.free(l.tomb_bits); gpa.free(l.extra); l.special.deinit(gpa); + l.* = undefined; } /// How many tomb bits per AIR instruction. -const bpi = 4; -const Bpi = std.meta.Int(.unsigned, bpi); -const OperandInt = std.math.Log2Int(Bpi); +pub const bpi = 4; +pub const Bpi = std.meta.Int(.unsigned, bpi); +pub const OperandInt = std.math.Log2Int(Bpi); /// In-progress data; on successful analysis converted into `Liveness`. const Analysis = struct { @@ -267,14 +291,14 @@ fn analyzeInst( const inst_data = inst_datas[inst].pl_op; const callee = inst_data.operand; const extra = a.air.extraData(Air.Call, inst_data.payload); - const args = a.air.extra[extra.end..][0..extra.data.args_len]; + const args = @bitCast([]const Air.Inst.Ref, a.air.extra[extra.end..][0..extra.data.args_len]); if (args.len <= bpi - 2) { - var buf: [bpi - 1]Air.Inst.Ref = undefined; + var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1); buf[0] = callee; - std.mem.copy(Air.Inst.Ref, buf[1..], @bitCast([]const Air.Inst.Ref, args)); + std.mem.copy(Air.Inst.Ref, buf[1..], args); return trackOperands(a, new_set, inst, main_tomb, buf); } - @panic("TODO: liveness analysis for function with greater than 2 args"); + @panic("TODO: liveness analysis for function call with greater than 2 args"); }, .struct_field_ptr => { const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data; @@ -285,12 +309,12 @@ fn analyzeInst( const extended = a.zir.instructions.items(.data)[extra.data.zir_index].extended; const outputs_len = @truncate(u5, extended.small); const inputs_len = @truncate(u5, extended.small >> 5); - const outputs = a.air.extra[extra.end..][0..outputs_len]; - const inputs = a.air.extra[extra.end + outputs.len ..][0..inputs_len]; - if (outputs.len + inputs.len <= bpi - 1) { - var buf: [bpi - 1]Air.Inst.Ref = undefined; - std.mem.copy(Air.Inst.Ref, &buf, @bitCast([]const Air.Inst.Ref, outputs)); - std.mem.copy(Air.Inst.Ref, buf[outputs.len..], @bitCast([]const Air.Inst.Ref, inputs)); + const outputs = @bitCast([]const Air.Inst.Ref, a.air.extra[extra.end..][0..outputs_len]); + const args = @bitCast([]const Air.Inst.Ref, a.air.extra[extra.end + outputs.len ..][0..inputs_len]); + if (outputs.len + args.len <= bpi - 1) { + var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1); + std.mem.copy(Air.Inst.Ref, &buf, outputs); + std.mem.copy(Air.Inst.Ref, buf[outputs.len..], args); return trackOperands(a, new_set, inst, main_tomb, buf); } @panic("TODO: liveness analysis for asm with greater than 3 args"); diff --git a/src/Module.zig b/src/Module.zig index f452824d33..c101221f2e 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1309,7 +1309,7 @@ pub const Scope = struct { const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len); sema.air_instructions.appendAssumeCapacity(inst); block.instructions.appendAssumeCapacity(result_index); - return Sema.indexToRef(result_index); + return Air.indexToRef(result_index); } }; }; @@ -3533,7 +3533,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) SemaError!Air { const ty_ref = try sema.addType(param_type); const arg_index = @intCast(u32, sema.air_instructions.len); inner_block.instructions.appendAssumeCapacity(arg_index); - param_inst.* = Sema.indexToRef(arg_index); + param_inst.* = Air.indexToRef(arg_index); try sema.air_instructions.append(gpa, .{ .tag = .arg, .data = .{ diff --git a/src/Sema.zig b/src/Sema.zig index a144ce1d50..777619dc48 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1301,7 +1301,7 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!A // Set the name of the Air.Arg instruction for use by codegen debug info. const air_arg = sema.param_inst_list[arg_index]; - sema.air_instructions.items(.data)[refToIndex(air_arg).?].ty_str.str = inst_data.start; + sema.air_instructions.items(.data)[Air.refToIndex(air_arg).?].ty_str.str = inst_data.start; return air_arg; } @@ -1389,7 +1389,7 @@ fn zirAllocInferred( // to the block even though it is currently a `.constant`. const result = try sema.addConstant(inferred_alloc_ty, Value.initPayload(&val_payload.base)); try sema.requireFunctionBlock(block, src); - try block.instructions.append(sema.gpa, refToIndex(result).?); + try block.instructions.append(sema.gpa, Air.refToIndex(result).?); return result; } @@ -1400,7 +1400,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const ptr = sema.resolveInst(inst_data.operand); - const ptr_inst = refToIndex(ptr).?; + const ptr_inst = Air.refToIndex(ptr).?; assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const air_datas = sema.air_instructions.items(.data); const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; @@ -1586,7 +1586,7 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) const bin_inst = sema.code.instructions.items(.data)[inst].bin; const ptr = sema.resolveInst(bin_inst.lhs); const value = sema.resolveInst(bin_inst.rhs); - const ptr_inst = refToIndex(ptr).?; + const ptr_inst = Air.refToIndex(ptr).?; assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const air_datas = sema.air_instructions.items(.data); const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; @@ -1968,13 +1968,13 @@ fn analyzeBlockBody( // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); - assert(sema.typeOf(indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn()); + assert(sema.typeOf(Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn()); if (merges.results.items.len == 0) { // No need for a block instruction. We can put the new instructions // directly into the parent block. try parent_block.instructions.appendSlice(gpa, child_block.instructions.items); - return indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1]); + return Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1]); } if (merges.results.items.len == 1) { const last_inst_index = child_block.instructions.items.len - 1; @@ -2025,7 +2025,7 @@ fn analyzeBlockBody( continue; } assert(coerce_block.instructions.items[coerce_block.instructions.items.len - 1] == - refToIndex(coerced_operand).?); + Air.refToIndex(coerced_operand).?); // Convert the br operand to a block. const br_operand_ty_ref = try sema.addType(br_operand_ty); @@ -2034,7 +2034,7 @@ fn analyzeBlockBody( try sema.air_instructions.ensureUnusedCapacity(gpa, 2); const sub_block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); const sub_br_inst = sub_block_inst + 1; - sema.air_instructions.items(.data)[br].br.operand = indexToRef(sub_block_inst); + sema.air_instructions.items(.data)[br].br.operand = Air.indexToRef(sub_block_inst); sema.air_instructions.appendAssumeCapacity(.{ .tag = .block, .data = .{ .ty_pl = .{ @@ -2054,7 +2054,7 @@ fn analyzeBlockBody( } }, }); } - return indexToRef(merges.block_inst); + return Air.indexToRef(merges.block_inst); } fn zirExport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { @@ -2149,7 +2149,7 @@ fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) Compil if (label.zir_block == zir_block) { const br_ref = try start_block.addBr(label.merges.block_inst, operand); try label.merges.results.append(sema.gpa, operand); - try label.merges.br_list.append(sema.gpa, refToIndex(br_ref).?); + try label.merges.br_list.append(sema.gpa, Air.refToIndex(br_ref).?); return inst; } } @@ -5310,7 +5310,7 @@ fn zirBoolBr( } } }); try parent_block.instructions.append(gpa, block_inst); - return indexToRef(block_inst); + return Air.indexToRef(block_inst); } fn zirIsNonNull( @@ -7204,7 +7204,7 @@ fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedVal } }, }); try block.instructions.append(gpa, result_inst); - return indexToRef(result_inst); + return Air.indexToRef(result_inst); } fn analyzeRef( @@ -8021,107 +8021,18 @@ fn enumFieldSrcLoc( } else unreachable; } -/// This is only meant to be called by `typeOf`. -fn analyzeAsTypeInfallible(sema: *Sema, inst: Air.Inst.Ref) Type { - var i: usize = @enumToInt(inst); - if (i < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; - } - i -= Air.Inst.Ref.typed_value_map.len; - assert(sema.air_instructions.items(.tag)[i] == .const_ty); - return sema.air_instructions.items(.data)[i].ty; -} - /// Returns the type of the AIR instruction. fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { - var i: usize = @enumToInt(inst); - if (i < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[i].ty; - } - i -= Air.Inst.Ref.typed_value_map.len; + return sema.getTmpAir().typeOf(inst); +} - const air_datas = sema.air_instructions.items(.data); - switch (sema.air_instructions.items(.tag)[i]) { - .arg => return sema.analyzeAsTypeInfallible(air_datas[i].ty_str.ty), - - .add, - .addwrap, - .sub, - .subwrap, - .mul, - .mulwrap, - .div, - .bit_and, - .bit_or, - .xor, - => return sema.typeOf(air_datas[i].bin_op.lhs), - - .cmp_lt, - .cmp_lte, - .cmp_eq, - .cmp_gte, - .cmp_gt, - .cmp_neq, - .is_null, - .is_non_null, - .is_null_ptr, - .is_non_null_ptr, - .is_err, - .is_non_err, - .is_err_ptr, - .is_non_err_ptr, - .bool_and, - .bool_or, - => return Type.initTag(.bool), - - .const_ty => return Type.initTag(.type), - - .alloc => return air_datas[i].ty, - - .assembly, - .block, - .constant, - .varptr, - .struct_field_ptr, - => return sema.analyzeAsTypeInfallible(air_datas[i].ty_pl.ty), - - .not, - .bitcast, - .load, - .ref, - .floatcast, - .intcast, - .optional_payload, - .optional_payload_ptr, - .wrap_optional, - .unwrap_errunion_payload, - .unwrap_errunion_err, - .unwrap_errunion_payload_ptr, - .unwrap_errunion_err_ptr, - .wrap_errunion_payload, - .wrap_errunion_err, - => return sema.analyzeAsTypeInfallible(air_datas[i].ty_op.ty), - - .loop, - .br, - .cond_br, - .switch_br, - .ret, - .unreach, - => return Type.initTag(.noreturn), - - .breakpoint, - .dbg_stmt, - .store, - => return Type.initTag(.void), - - .ptrtoint => return Type.initTag(.usize), - - .call => { - const callee_ty = sema.typeOf(air_datas[i].pl_op.operand); - return callee_ty.fnReturnType(); - }, - } +fn getTmpAir(sema: Sema) Air { + return .{ + .instructions = sema.air_instructions.slice(), + .extra = sema.air_extra.items, + .values = sema.air_values.items, + .variables = sema.air_variables.items, + }; } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { @@ -8185,7 +8096,7 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .tag = .const_ty, .data = .{ .ty = ty }, }); - return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { @@ -8207,22 +8118,7 @@ fn addConstant(sema: *Sema, ty: Type, val: Value) CompileError!Air.Inst.Ref { .payload = @intCast(u32, sema.air_values.items.len - 1), } }, }); - return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); -} - -const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; - -pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { - return @intToEnum(Air.Inst.Ref, ref_start_index + inst); -} - -pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { - const ref_int = @enumToInt(inst); - if (ref_int >= ref_start_index) { - return ref_int - ref_start_index; - } else { - return null; - } + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 { diff --git a/src/Zir.zig b/src/Zir.zig index 42924817fc..cf349a6a8d 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2176,7 +2176,8 @@ pub const Inst = struct { /// 2. clobber: u32 // index into string_bytes (null terminated) for every clobbers_len. pub const Asm = struct { src_node: i32, - asm_source: Ref, + // null-terminated string index + asm_source: u32, /// 1 bit for each outputs_len: whether it uses `-> T` or not. /// 0b0 - operand is a pointer to where to store the output. /// 0b1 - operand is a type; asm expression has the output as the result. @@ -3383,9 +3384,10 @@ const Writer = struct { const inputs_len = @truncate(u5, extended.small >> 5); const clobbers_len = @truncate(u5, extended.small >> 10); const is_volatile = @truncate(u1, extended.small >> 15) != 0; + const asm_source = self.code.nullTerminatedString(extra.data.asm_source); try self.writeFlag(stream, "volatile, ", is_volatile); - try self.writeInstRef(stream, extra.data.asm_source); + try stream.print("\"{}\", ", .{std.zig.fmtEscapes(asm_source)}); try stream.writeAll(", "); var extra_i: usize = extra.end; diff --git a/src/codegen.zig b/src/codegen.zig index 1495b19673..bc22d7ec19 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -3,6 +3,7 @@ const mem = std.mem; const math = std.math; const assert = std.debug.assert; const Air = @import("Air.zig"); +const Zir = @import("Zir.zig"); const Liveness = @import("Liveness.zig"); const Type = @import("type.zig").Type; const Value = @import("value.zig").Value; @@ -337,6 +338,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// to place a new stack allocation, it goes here, and then bumps `max_end_stack`. next_stack_offset: u32 = 0, + /// Debug field, used to find bugs in the compiler. + air_bookkeeping: @TypeOf(air_bookkeeping_init) = air_bookkeeping_init, + + const air_bookkeeping_init = if (std.debug.runtime_safety) @as(usize, 0) else {}; + const MCValue = union(enum) { /// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc. /// TODO Look into deleting this tag and using `dead` instead, since every use @@ -751,24 +757,91 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { - for (body) |inst| { - const tomb_bits = self.liveness.getTombBits(inst); - try self.ensureProcessDeathCapacity(@popCount(@TypeOf(tomb_bits), tomb_bits)); + const air_tags = self.air.instructions.items(.tag); - const mcv = try self.genFuncInst(inst); - if (!self.liveness.isUnused(inst)) { - log.debug("{} => {}", .{ inst, mcv }); - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - try branch.inst_table.putNoClobber(self.gpa, inst, mcv); + for (body) |inst| { + const old_air_bookkeeping = self.air_bookkeeping; + try self.ensureProcessDeathCapacity(Liveness.bpi); + + switch (air_tags[inst]) { + // zig fmt: off + .add => try self.airAdd(inst), + .addwrap => try self.airAddWrap(inst), + .sub => try self.airSub(inst), + .subwrap => try self.airSubWrap(inst), + .mul => try self.airMul(inst), + .mulwrap => try self.airMulWrap(inst), + .div => try self.airDiv(inst), + + .cmp_lt => try self.airCmp(inst, .lt), + .cmp_lte => try self.airCmp(inst, .lte), + .cmp_eq => try self.airCmp(inst, .eq), + .cmp_gte => try self.airCmp(inst, .gte), + .cmp_gt => try self.airCmp(inst, .gt), + .cmp_neq => try self.airCmp(inst, .neq), + + .bool_and => try self.airBoolOp(inst), + .bool_or => try self.airBoolOp(inst), + .bit_and => try self.airBitAnd(inst), + .bit_or => try self.airBitOr(inst), + .xor => try self.airXor(inst), + + .alloc => try self.airAlloc(inst), + .arg => try self.airArg(inst), + .assembly => try self.airAsm(inst), + .bitcast => try self.airBitCast(inst), + .block => try self.airBlock(inst), + .br => try self.airBr(inst), + .breakpoint => try self.airBreakpoint(), + .call => try self.airCall(inst), + .cond_br => try self.airCondBr(inst), + .dbg_stmt => try self.airDbgStmt(inst), + .floatcast => try self.airFloatCast(inst), + .intcast => try self.airIntCast(inst), + .is_non_null => try self.airIsNonNull(inst), + .is_non_null_ptr => try self.airIsNonNullPtr(inst), + .is_null => try self.airIsNull(inst), + .is_null_ptr => try self.airIsNullPtr(inst), + .is_non_err => try self.airIsNonErr(inst), + .is_non_err_ptr => try self.airIsNonErrPtr(inst), + .is_err => try self.airIsErr(inst), + .is_err_ptr => try self.airIsErrPtr(inst), + .load => try self.airLoad(inst), + .loop => try self.airLoop(inst), + .not => try self.airNot(inst), + .ptrtoint => try self.airPtrToInt(inst), + .ref => try self.airRef(inst), + .ret => try self.airRet(inst), + .store => try self.airStore(inst), + .struct_field_ptr=> try self.airStructFieldPtr(inst), + .switch_br => try self.airSwitch(inst), + .varptr => try self.airVarPtr(inst), + + .constant => unreachable, // excluded from function bodies + .const_ty => unreachable, // excluded from function bodies + .unreach => self.finishAirBookkeeping(), + + .optional_payload => try self.airOptionalPayload(inst), + .optional_payload_ptr => try self.airOptionalPayloadPtr(inst), + .unwrap_errunion_err => try self.airUnwrapErrErr(inst), + .unwrap_errunion_payload => try self.airUnwrapErrPayload(inst), + .unwrap_errunion_err_ptr => try self.airUnwrapErrErrPtr(inst), + .unwrap_errunion_payload_ptr=> try self.airUnwrapErrPayloadPtr(inst), + + .wrap_optional => try self.airWrapOptional(inst), + .wrap_errunion_payload => try self.airWrapErrUnionPayload(inst), + .wrap_errunion_err => try self.airWrapErrUnionErr(inst), + // zig fmt: on + } + if (std.debug.runtime_safety) { + if (self.air_bookkeeping != old_air_bookkeeping + 1) { + std.debug.panic( + \\in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. + \\Look for a missing call to finishAir or an extra call to it. + \\ + , .{ inst, air_tags[inst] }); + } } - - // TODO inline this logic into every instruction - @panic("TODO rework AIR memory layout codegen for processing deaths"); - //var i: ir.Inst.DeathsBitIndex = 0; - //while (inst.getOperand(i)) |operand| : (i += 1) { - // if (inst.operandDies(i)) - // self.processDeath(operand); - //} } } @@ -833,9 +906,36 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } + /// Called when there are no operands, and the instruction is always unreferenced. + fn finishAirBookkeeping(self: *Self) void { + if (std.debug.runtime_safety) { + self.air_bookkeeping += 1; + } + } + + fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void { + var tomb_bits = self.liveness.getTombBits(inst); + for (operands) |op| { + const dies = @truncate(u1, tomb_bits) != 0; + tomb_bits >>= 1; + if (!dies) continue; + const op_int = @enumToInt(op); + if (op_int < Air.Inst.Ref.typed_value_map.len) continue; + const operand: Air.Inst.Index = op_int - @intCast(u32, Air.Inst.Ref.typed_value_map.len); + self.processDeath(operand); + } + const is_used = @truncate(u1, tomb_bits) == 0; + if (is_used) { + log.debug("{} => {}", .{ inst, result }); + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + branch.inst_table.putAssumeCapacityNoClobber(inst, result); + } + self.finishAirBookkeeping(); + } + fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { const table = &self.branch_stack.items[self.branch_stack.items.len - 1].inst_table; - try table.ensureCapacity(self.gpa, table.count() + additional_count); + try table.ensureUnusedCapacity(self.gpa, additional_count); } /// Adds a Type to the .debug_info at the current position. The bytes will be populated later, @@ -860,83 +960,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genFuncInst(self: *Self, inst: Air.Inst.Index) !MCValue { - const air_tags = self.air.instructions.items(.tag); - switch (air_tags[inst]) { - // zig fmt: off - //.add => return self.genAdd(inst.castTag(.add).?), - //.addwrap => return self.genAddWrap(inst.castTag(.addwrap).?), - //.sub => return self.genSub(inst.castTag(.sub).?), - //.subwrap => return self.genSubWrap(inst.castTag(.subwrap).?), - //.mul => return self.genMul(inst.castTag(.mul).?), - //.mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?), - //.div => return self.genDiv(inst.castTag(.div).?), - - //.cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt), - //.cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte), - //.cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq), - //.cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte), - //.cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt), - //.cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq), - - //.bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), - //.bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), - //.bit_and => return self.genBitAnd(inst.castTag(.bit_and).?), - //.bit_or => return self.genBitOr(inst.castTag(.bit_or).?), - //.xor => return self.genXor(inst.castTag(.xor).?), - - //.alloc => return self.genAlloc(inst.castTag(.alloc).?), - //.arg => return self.genArg(inst.castTag(.arg).?), - //.assembly => return self.genAsm(inst.castTag(.assembly).?), - //.bitcast => return self.genBitCast(inst.castTag(.bitcast).?), - //.block => return self.genBlock(inst.castTag(.block).?), - //.br => return self.genBr(inst.castTag(.br).?), - //.br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), - //.breakpoint => return self.genBreakpoint(inst.src), - //.call => return self.genCall(inst.castTag(.call).?), - //.cond_br => return self.genCondBr(inst.castTag(.condbr).?), - //.dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?), - //.floatcast => return self.genFloatCast(inst.castTag(.floatcast).?), - //.intcast => return self.genIntCast(inst.castTag(.intcast).?), - //.is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?), - //.is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?), - //.is_null => return self.genIsNull(inst.castTag(.is_null).?), - //.is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), - //.is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?), - //.is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?), - //.is_err => return self.genIsErr(inst.castTag(.is_err).?), - //.is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), - //.load => return self.genLoad(inst.castTag(.load).?), - //.loop => return self.genLoop(inst.castTag(.loop).?), - //.not => return self.genNot(inst.castTag(.not).?), - //.ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?), - //.ref => return self.genRef(inst.castTag(.ref).?), - //.ret => return self.genRet(inst.castTag(.ret).?), - //.store => return self.genStore(inst.castTag(.store).?), - //.struct_field_ptr=> return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), - //.switch_br => return self.genSwitch(inst.castTag(.switchbr).?), - //.varptr => return self.genVarPtr(inst.castTag(.varptr).?), - - //.constant => unreachable, // excluded from function bodies - //.unreach => return MCValue{ .unreach = {} }, - - //.optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), - //.optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), - //.unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?), - //.unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?), - //.unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?), - //.unwrap_errunion_payload_ptr=> return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?), - - //.wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), - //.wrap_errunion_payload => return self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), - //.wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?), - - // zig fmt: on - - else => @panic("TODO finish air memory layout branch, more codegen.zig instructions"), - } - } - fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 { if (abi_align > self.stack_align) self.stack_align = abi_align; @@ -954,7 +977,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const elem_ty = self.air.getType(inst).elemType(); + const elem_ty = self.air.typeOfIndex(inst).elemType(); const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty}); }; @@ -964,7 +987,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const elem_ty = inst.ty; + const elem_ty = self.air.typeOfIndex(inst); const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty}); }; @@ -993,7 +1016,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { assert(reg == toCanonicalReg(reg_mcv.register)); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(inst.ty, stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// Copies a value to a register without tracking the register. The register is not considered @@ -1010,281 +1033,274 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { const reg = try self.register_manager.allocReg(reg_owner, &.{}); - try self.genSetReg(reg_owner.ty, reg, mcv); + try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } - fn genAlloc(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMemPtr(inst); - return MCValue{ .ptr_stack_offset = stack_offset }; + return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none }); } - fn genFloatCast(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airFloatCast(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement floatCast for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genIntCast(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. + fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) - return MCValue.dead; + return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); - const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.getType(ty_op.operand); + const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const info_a = operand_ty.intInfo(self.target.*); - const info_b = self.air.getType(inst).intInfo(self.target.*); + const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); if (info_a.bits == info_b.bits) - return operand; + return self.finishAir(inst, operand, .{ ty_op.operand, .none, .none }); - switch (arch) { + const result: MCValue = switch (arch) { else => return self.fail("TODO implement intCast for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genNot(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand = try self.resolveInst(ty_op.operand); - switch (operand) { - .dead => unreachable, - .unreach => unreachable, - .compare_flags_unsigned => |op| return MCValue{ - .compare_flags_unsigned = switch (op) { - .gte => .lt, - .gt => .lte, - .neq => .eq, - .lt => .gte, - .lte => .gt, - .eq => .neq, + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand = try self.resolveInst(ty_op.operand); + switch (operand) { + .dead => unreachable, + .unreach => unreachable, + .compare_flags_unsigned => |op| { + const r = MCValue{ + .compare_flags_unsigned = switch (op) { + .gte => .lt, + .gt => .lte, + .neq => .eq, + .lt => .gte, + .lte => .gt, + .eq => .neq, + }, + }; + break :result r; }, - }, - .compare_flags_signed => |op| return MCValue{ - .compare_flags_signed = switch (op) { - .gte => .lt, - .gt => .lte, - .neq => .eq, - .lt => .gte, - .lte => .gt, - .eq => .neq, + .compare_flags_signed => |op| { + const r = MCValue{ + .compare_flags_signed = switch (op) { + .gte => .lt, + .gt => .lte, + .neq => .eq, + .lt => .gte, + .lte => .gt, + .eq => .neq, + }, + }; + break :result r; }, - }, - else => {}, - } + else => {}, + } - switch (arch) { - .x86_64 => { - return try self.genX8664BinMath(inst, ty_op.operand, .bool_true); - }, - .arm, .armeb => { - return try self.genArmBinOp(inst, ty_op.operand, .bool_true, .not); - }, - else => return self.fail("TODO implement NOT for {}", .{self.target.cpu.arch}), - } + switch (arch) { + .x86_64 => { + break :result try self.genX8664BinMath(inst, ty_op.operand, .bool_true); + }, + .arm, .armeb => { + break :result try self.genArmBinOp(inst, ty_op.operand, .bool_true, .not); + }, + else => return self.fail("TODO implement NOT for {}", .{self.target.cpu.arch}), + } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genAdd(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airAdd(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - switch (arch) { - .x86_64 => { - return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs); - }, - .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .add), + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + .x86_64 => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .add), else => return self.fail("TODO implement add for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genAddWrap(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airAddWrap(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - _ = bin_op; - switch (arch) { + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement addwrap for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genMul(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airSub(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - switch (arch) { - .x86_64 => return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), - .arm, .armeb => return try self.genArmMul(inst, bin_op.lhs, bin_op.rhs), + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + .x86_64 => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .sub), + else => return self.fail("TODO implement sub for {}", .{self.target.cpu.arch}), + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + } + + fn airSubWrap(self: *Self, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + else => return self.fail("TODO implement subwrap for {}", .{self.target.cpu.arch}), + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + } + + fn airMul(self: *Self, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + .x86_64 => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .arm, .armeb => try self.genArmMul(inst, bin_op.lhs, bin_op.rhs), else => return self.fail("TODO implement mul for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genMulWrap(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airMulWrap(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - _ = bin_op; - switch (arch) { + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement mulwrap for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genDiv(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airDiv(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - _ = bin_op; - switch (arch) { + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement div for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genBitAnd(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - switch (arch) { - .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_and), + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + .arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_and), else => return self.fail("TODO implement bitwise and for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genBitOr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airBitOr(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - switch (arch) { - .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_or), + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + .arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_or), else => return self.fail("TODO implement bitwise or for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genXor(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airXor(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - switch (arch) { - .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .xor), + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + .arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .xor), else => return self.fail("TODO implement xor for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genOptionalPayload(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement .optional_payload for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement .optional_payload_ptr for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement unwrap error union error for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement unwrap error union payload for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } + // *(E!T) -> E - fn genUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement unwrap error union error ptr for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } + // *(E!T) -> *T - fn genUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement unwrap error union payload ptr for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genWrapOptional(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - const optional_ty = self.air.getType(inst); + fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const optional_ty = self.air.typeOfIndex(inst); - // Optional type is just a boolean true - if (optional_ty.abiSize(self.target.*) == 1) - return MCValue{ .immediate = 1 }; + // Optional with a zero-bit payload type is just a boolean true + if (optional_ty.abiSize(self.target.*) == 1) + break :result MCValue{ .immediate = 1 }; - switch (arch) { - else => return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}), - } + switch (arch) { + else => return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}), + } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } /// T to E!T - fn genWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - - switch (arch) { + fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement wrap errunion payload for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } /// E to E!T - fn genWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - - switch (arch) { + fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement wrap errunion error for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genVarPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airVarPtr(self: *Self, inst: Air.Inst.Index) !void { + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement varptr for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ .none, .none, .none }); } - fn reuseOperand(self: *Self, inst: Air.Inst.Index, op_index: u2, mcv: MCValue) bool { + fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool { if (!self.liveness.operandDies(inst, op_index)) return false; @@ -1310,12 +1326,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // That makes us responsible for doing the rest of the stuff that processDeath would have done. const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - branch.inst_table.putAssumeCapacity(inst.getOperand(op_index).?, .dead); + branch.inst_table.putAssumeCapacity(Air.refToIndex(operand).?, .dead); return true; } - fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue) !void { + fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) !void { + const elem_ty = ptr_ty.elemType(); switch (ptr) { .none => unreachable, .undef => unreachable, @@ -1343,31 +1360,37 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genLoad(self: *Self, inst: Air.Inst.Index) !MCValue { - const elem_ty = self.air.getType(inst); - if (!elem_ty.hasCodeGenBits()) - return MCValue.none; - const ptr = try self.resolveInst(inst.operand); - const is_volatile = inst.operand.ty.isVolatilePtr(); - if (self.liveness.isUnused(inst) and !is_volatile) - return MCValue.dead; - const dst_mcv: MCValue = blk: { - if (self.reuseOperand(inst, 0, ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } + fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const elem_ty = self.air.typeOfIndex(inst); + const result: MCValue = result: { + if (!elem_ty.hasCodeGenBits()) + break :result MCValue.none; + + const ptr = try self.resolveInst(ty_op.operand); + const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + if (self.liveness.isUnused(inst) and !is_volatile) + break :result MCValue.dead; + + const dst_mcv: MCValue = blk: { + if (self.reuseOperand(inst, ty_op.operand, 0, ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); + break :result dst_mcv; }; - self.load(dst_mcv, ptr); - return dst_mcv; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genStore(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airStore(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const elem_ty = self.getType(bin_op.rhs); + const elem_ty = self.air.typeOf(bin_op.rhs); switch (ptr) { .none => unreachable, .undef => unreachable, @@ -1397,36 +1420,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.fail("TODO implement storing to MCValue.stack_offset", .{}); }, } - return .none; + return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genStructFieldPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - const struct_field_ptr = self.air.instructions.items(.data)[inst].struct_field_ptr; - _ = struct_field_ptr; + fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; + _ = extra; return self.fail("TODO implement codegen struct_field_ptr", .{}); - } - - fn genSub(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - const bin_op = self.air.instructions.items(.data)[inst].bin_op; - switch (arch) { - .x86_64 => return self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), - .arm, .armeb => return self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .sub), - else => return self.fail("TODO implement sub for {}", .{self.target.cpu.arch}), - } - } - - fn genSubWrap(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - const bin_op = self.air.instructions.items(.data)[inst].bin_op; - _ = bin_op; - switch (arch) { - else => return self.fail("TODO implement subwrap for {}", .{self.target.cpu.arch}), - } + //return self.finishAir(inst, result, .{ extra.struct_ptr, .none, .none }); } fn armOperandShouldBeRegister(self: *Self, mcv: MCValue) !bool { @@ -1461,8 +1463,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const rhs_is_register = rhs == .register; const lhs_should_be_register = try self.armOperandShouldBeRegister(lhs); const rhs_should_be_register = try self.armOperandShouldBeRegister(rhs); - const reuse_lhs = lhs_is_register and self.reuseOperand(inst, 0, lhs); - const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, 1, rhs); + const reuse_lhs = lhs_is_register and self.reuseOperand(inst, op_lhs, 0, lhs); + const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, op_rhs, 1, rhs); // Destination must be a register var dst_mcv: MCValue = undefined; @@ -1476,14 +1478,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Allocate 0 or 1 registers if (!rhs_is_register and rhs_should_be_register) { rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_rhs, &.{lhs.register}) }; - branch.inst_table.putAssumeCapacity(op_rhs, rhs_mcv); + branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } dst_mcv = lhs; } else if (reuse_rhs) { // Allocate 0 or 1 registers if (!lhs_is_register and lhs_should_be_register) { lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_lhs, &.{rhs.register}) }; - branch.inst_table.putAssumeCapacity(op_lhs, lhs_mcv); + branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv); } dst_mcv = rhs; @@ -1508,7 +1510,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { rhs_mcv = MCValue{ .register = regs[1] }; dst_mcv = lhs_mcv; - branch.inst_table.putAssumeCapacity(op_rhs, rhs_mcv); + branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } } else if (lhs_should_be_register) { // RHS is immediate @@ -1605,14 +1607,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genArmMul(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Index, op_rhs: Air.Inst.Index) !MCValue { + fn genArmMul(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref) !MCValue { const lhs = try self.resolveInst(op_lhs); const rhs = try self.resolveInst(op_rhs); const lhs_is_register = lhs == .register; const rhs_is_register = rhs == .register; - const reuse_lhs = lhs_is_register and self.reuseOperand(inst, 0, lhs); - const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, 1, rhs); + const reuse_lhs = lhs_is_register and self.reuseOperand(inst, op_lhs, 0, lhs); + const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, op_rhs, 1, rhs); // Destination must be a register // LHS must be a register @@ -1627,14 +1629,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Allocate 0 or 1 registers if (!rhs_is_register) { rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_rhs, &.{lhs.register}) }; - branch.inst_table.putAssumeCapacity(op_rhs, rhs_mcv); + branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } dst_mcv = lhs; } else if (reuse_rhs) { // Allocate 0 or 1 registers if (!lhs_is_register) { lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_lhs, &.{rhs.register}) }; - branch.inst_table.putAssumeCapacity(op_lhs, lhs_mcv); + branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv); } dst_mcv = rhs; } else { @@ -1656,7 +1658,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { rhs_mcv = MCValue{ .register = regs[1] }; dst_mcv = lhs_mcv; - branch.inst_table.putAssumeCapacity(op_rhs, rhs_mcv); + branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } } @@ -1698,8 +1700,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // as the result MCValue. var dst_mcv: MCValue = undefined; var src_mcv: MCValue = undefined; - var src_inst: Air.Inst.Index = undefined; - if (self.reuseOperand(inst, 0, lhs)) { + var src_inst: Air.Inst.Ref = undefined; + if (self.reuseOperand(inst, op_lhs, 0, lhs)) { // LHS dies; use it as the destination. // Both operands cannot be memory. src_inst = op_rhs; @@ -1710,7 +1712,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { dst_mcv = lhs; src_mcv = rhs; } - } else if (self.reuseOperand(inst, 1, rhs)) { + } else if (self.reuseOperand(inst, op_rhs, 1, rhs)) { // RHS dies; use it as the destination. // Both operands cannot be memory. src_inst = op_lhs; @@ -1747,16 +1749,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } // Now for step 2, we perform the actual op + const inst_ty = self.air.typeOfIndex(inst); const air_tags = self.air.instructions.items(.tag); switch (air_tags[inst]) { // TODO: Generate wrapping and non-wrapping versions separately - .add, .addwrap => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 0, 0x00), - .bool_or, .bit_or => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 1, 0x08), - .bool_and, .bit_and => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 4, 0x20), - .sub, .subwrap => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 5, 0x28), - .xor, .not => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 6, 0x30), + .add, .addwrap => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 0, 0x00), + .bool_or, .bit_or => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 1, 0x08), + .bool_and, .bit_and => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 4, 0x20), + .sub, .subwrap => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 5, 0x28), + .xor, .not => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 6, 0x30), - .mul, .mulwrap => try self.genX8664Imul(inst.src, inst.ty, dst_mcv, src_mcv), + .mul, .mulwrap => try self.genX8664Imul(inst_ty, dst_mcv, src_mcv), else => unreachable, } @@ -1958,7 +1961,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .ptr_stack_offset => unreachable, .ptr_embedded_in_code => unreachable, .register => |src_reg| { - try self.genX8664ModRMRegToStack(src, dst_ty, off, src_reg, mr + 0x1); + try self.genX8664ModRMRegToStack(dst_ty, off, src_reg, mr + 0x1); }, .immediate => |imm| { _ = imm; @@ -1984,7 +1987,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// Performs integer multiplication between dst_mcv and src_mcv, storing the result in dst_mcv. fn genX8664Imul( self: *Self, - src: LazySrcLoc, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue, @@ -2067,7 +2069,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { encoder.imm32(@intCast(i32, imm)); } else { const src_reg = try self.copyToTmpRegister(dst_ty, src_mcv); - return self.genX8664Imul(src, dst_ty, dst_mcv, MCValue{ .register = src_reg }); + return self.genX8664Imul(dst_ty, dst_mcv, MCValue{ .register = src_reg }); } }, .embedded_in_code, .memory, .stack_offset => { @@ -2163,7 +2165,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, mcv: MCValue) !void { - const ty_str = self.air.instruction.items(.data)[inst].ty_str; + const ty_str = self.air.instructions.items(.data)[inst].ty_str; const zir = &self.mod_fn.owner_decl.namespace.file_scope.zir; const name = zir.nullTerminatedString(ty_str.str); const name_with_null = name.ptr[0 .. name.len + 1]; @@ -2224,11 +2226,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genArg(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airArg(self: *Self, inst: Air.Inst.Index) !void { const arg_index = self.arg_index; self.arg_index += 1; - const ty = self.air.getType(inst); + const ty = self.air.typeOfIndex(inst); const result = self.args[arg_index]; const mcv = switch (arch) { @@ -2252,7 +2254,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.genArgDbgInfo(inst, mcv); if (self.liveness.isUnused(inst)) - return MCValue.dead; + return self.finishAirBookkeeping(); switch (mcv) { .register => |reg| { @@ -2261,10 +2263,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { else => {}, } - return mcv; + return self.finishAir(inst, mcv, .{ .none, .none, .none }); } - fn genBreakpoint(self: *Self) !MCValue { + fn airBreakpoint(self: *Self) !void { switch (arch) { .i386, .x86_64 => { try self.code.append(0xcc); // int3 @@ -2280,15 +2282,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, else => return self.fail("TODO implement @breakpoint() for {}", .{self.target.cpu.arch}), } - return .none; + return self.finishAirBookkeeping(); } - fn genCall(self: *Self, inst: Air.Inst.Index) !MCValue { - const pl_op = self.air.instruction.items(.data)[inst].pl_op; - const fn_ty = self.air.getType(pl_op.operand); + fn airCall(self: *Self, inst: Air.Inst.Index) !void { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const fn_ty = self.air.typeOf(pl_op.operand); const callee = pl_op.operand; - const extra = self.air.extraData(Air.Call, inst_data.payload); - const args = self.air.extra[extra.end..][0..extra.data.args_len]; + const extra = self.air.extraData(Air.Call, pl_op.payload); + const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); var info = try self.resolveCallingConventionValues(fn_ty); defer info.deinit(self); @@ -2300,6 +2302,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .x86_64 => { for (info.args) |mc_arg, arg_i| { const arg = args[arg_i]; + const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); // Here we do not use setRegOrMem even though the logic is similar, because // the function call will move the stack pointer, so the offsets are different. @@ -2307,12 +2310,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .none => continue, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => |off| { // Here we need to emit instructions like this: // mov qword ptr [rsp + stack_offset], x - try self.genSetStack(arg.ty, off, arg_mcv); + try self.genSetStack(arg_ty, off, arg_mcv); }, .ptr_stack_offset => { return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); @@ -2389,6 +2392,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .arm, .armeb => { for (info.args) |mc_arg, arg_i| { const arg = args[arg_i]; + const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -2403,7 +2407,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned => unreachable, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => { return self.fail("TODO implement calling with parameters in memory", .{}); @@ -2452,6 +2456,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .aarch64 => { for (info.args) |mc_arg, arg_i| { const arg = args[arg_i]; + const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -2466,7 +2471,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned => unreachable, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => { return self.fail("TODO implement calling with parameters in memory", .{}); @@ -2510,6 +2515,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { for (info.args) |mc_arg, arg_i| { const arg = args[arg_i]; + const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); // Here we do not use setRegOrMem even though the logic is similar, because // the function call will move the stack pointer, so the offsets are different. @@ -2521,7 +2527,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .x86_64, .aarch64 => try self.register_manager.getReg(reg, null), else => unreachable, } - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => { // Here we need to emit instructions like this: @@ -2612,6 +2618,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .x86_64 => { for (info.args) |mc_arg, arg_i| { const arg = args[arg_i]; + const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); // Here we do not use setRegOrMem even though the logic is similar, because // the function call will move the stack pointer, so the offsets are different. @@ -2619,7 +2626,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .none => continue, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => { // Here we need to emit instructions like this: @@ -2661,6 +2668,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .aarch64 => { for (info.args) |mc_arg, arg_i| { const arg = inst.args[arg_i]; + const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(inst.args[arg_i]); switch (mc_arg) { @@ -2675,7 +2683,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned => unreachable, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => { return self.fail("TODO implement calling with parameters in memory", .{}); @@ -2696,7 +2704,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const got_index = func_payload.data.owner_decl.link.plan9.got_index.?; const fn_got_addr = got_addr + got_index * ptr_bytes; - try self.genSetReg(inst.base.src, Type.initTag(.usize), .x30, .{ .memory = fn_got_addr }); + try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr }); writeInt(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32()); } else if (func_value.castTag(.extern_fn)) |_| { @@ -2712,51 +2720,61 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } else unreachable; - switch (info.return_value) { - .register => |reg| { - if (Register.allocIndex(reg) == null) { - // Save function return value in a callee saved register - return try self.copyToNewRegister(inst, info.return_value); - } - }, - else => {}, - } + const result: MCValue = result: { + switch (info.return_value) { + .register => |reg| { + if (Register.allocIndex(reg) == null) { + // Save function return value in a callee saved register + break :result try self.copyToNewRegister(inst, info.return_value); + } + }, + else => {}, + } + break :result info.return_value; + }; - return info.return_value; + if (args.len <= Liveness.bpi - 2) { + var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); + buf[0] = callee; + std.mem.copy(Air.Inst.Ref, buf[1..], args); + return self.finishAir(inst, result, buf); + } + @panic("TODO: codegen for function call with greater than 2 args"); } - fn genRef(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airRef(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.getType(ty_op.operand); - const operand = try self.resolveInst(ty_op.operand); - switch (operand) { - .unreach => unreachable, - .dead => unreachable, - .none => return .none, - - .immediate, - .register, - .ptr_stack_offset, - .ptr_embedded_in_code, - .compare_flags_unsigned, - .compare_flags_signed, - => { - const stack_offset = try self.allocMemPtr(inst); - try self.genSetStack(operand_ty, stack_offset, operand); - return MCValue{ .ptr_stack_offset = stack_offset }; - }, + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ty = self.air.typeOf(ty_op.operand); + const operand = try self.resolveInst(ty_op.operand); + switch (operand) { + .unreach => unreachable, + .dead => unreachable, + .none => break :result MCValue{ .none = {} }, + + .immediate, + .register, + .ptr_stack_offset, + .ptr_embedded_in_code, + .compare_flags_unsigned, + .compare_flags_signed, + => { + const stack_offset = try self.allocMemPtr(inst); + try self.genSetStack(operand_ty, stack_offset, operand); + break :result MCValue{ .ptr_stack_offset = stack_offset }; + }, - .stack_offset => |offset| return MCValue{ .ptr_stack_offset = offset }, - .embedded_in_code => |offset| return MCValue{ .ptr_embedded_in_code = offset }, - .memory => |vaddr| return MCValue{ .immediate = vaddr }, + .stack_offset => |offset| break :result MCValue{ .ptr_stack_offset = offset }, + .embedded_in_code => |offset| break :result MCValue{ .ptr_embedded_in_code = offset }, + .memory => |vaddr| break :result MCValue{ .immediate = vaddr }, - .undef => return self.fail("TODO implement ref on an undefined value", .{}), - } + .undef => return self.fail("TODO implement ref on an undefined value", .{}), + } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn ret(self: *Self, mcv: MCValue) !MCValue { + fn ret(self: *Self, mcv: MCValue) !void { const ret_ty = self.fn_type.fnReturnType(); try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); switch (arch) { @@ -2786,28 +2804,28 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, else => return self.fail("TODO implement return for {}", .{self.target.cpu.arch}), } - return .unreach; } - fn genRet(self: *Self, inst: Air.Inst.Index) !MCValue { - const operand = try self.resolveInst(self.air.instructions.items(.data)[inst].un_op); - return self.ret(inst.base.src, operand); + fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); + try self.ret(operand); + return self.finishAirBookkeeping(); } - fn genCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.getType(bin_op.lhs); - assert(ty.eql(self.air.getType(bin_op.rhs))); + if (self.liveness.isUnused(inst)) + return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); + const ty = self.air.typeOf(bin_op.lhs); + assert(ty.eql(self.air.typeOf(bin_op.rhs))); if (ty.zigTypeTag() == .ErrorSet) return self.fail("TODO implement cmp for errors", .{}); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - switch (arch) { - .x86_64 => { + const result: MCValue = switch (arch) { + .x86_64 => result: { try self.code.ensureCapacity(self.code.items.len + 8); // There are 2 operands, destination and source. @@ -2822,12 +2840,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.genX8664BinMathCode(Type.initTag(.bool), dst_mcv, src_mcv, 7, 0x38); const info = ty.intInfo(self.target.*); - return switch (info.signedness) { + break :result switch (info.signedness) { .signed => MCValue{ .compare_flags_signed = op }, .unsigned => MCValue{ .compare_flags_unsigned = op }, }; }, - .arm, .armeb => { + .arm, .armeb => result: { const lhs_is_register = lhs == .register; const rhs_is_register = rhs == .register; // lhs should always be a register @@ -2854,39 +2872,40 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; if (lhs_mcv == .register and !lhs_is_register) { try self.genSetReg(ty, lhs_mcv.register, lhs); - branch.inst_table.putAssumeCapacity(bin_op.lhs, lhs); + branch.inst_table.putAssumeCapacity(Air.refToIndex(bin_op.lhs).?, lhs); } if (rhs_mcv == .register and !rhs_is_register) { try self.genSetReg(ty, rhs_mcv.register, rhs); - branch.inst_table.putAssumeCapacity(bin_op.rhs, rhs); + branch.inst_table.putAssumeCapacity(Air.refToIndex(bin_op.rhs).?, rhs); } // The destination register is not present in the cmp instruction try self.genArmBinOpCode(undefined, lhs_mcv, rhs_mcv, false, .cmp_eq); const info = ty.intInfo(self.target.*); - return switch (info.signedness) { + break :result switch (info.signedness) { .signed => MCValue{ .compare_flags_signed = op }, .unsigned => MCValue{ .compare_flags_unsigned = op }, }; }, else => return self.fail("TODO implement cmp for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genDbgStmt(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; try self.dbgAdvancePCAndLine(dbg_stmt.line, dbg_stmt.column); - assert(self.liveness.isUnused(inst)); - return MCValue.dead; + return self.finishAirBookkeeping(); } - fn genCondBr(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); - const extra = self.air.extraData(Air.CondBr, inst_data.payload); + const extra = self.air.extraData(Air.CondBr, pl_op.payload); const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; + const liveness_condbr = self.liveness.getCondBr(inst); const reloc: Reloc = switch (arch) { .i386, .x86_64 => reloc: { @@ -2985,9 +3004,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.branch_stack.append(.{}); - const then_deaths = self.liveness.thenDeaths(inst); - try self.ensureProcessDeathCapacity(then_deaths.len); - for (then_deaths) |operand| { + try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len); + for (liveness_condbr.then_deaths) |operand| { self.processDeath(operand); } try self.genBody(then_body); @@ -3010,9 +3028,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const else_branch = self.branch_stack.addOneAssumeCapacity(); else_branch.* = .{}; - const else_deaths = self.liveness.elseDeaths(inst); - try self.ensureProcessDeathCapacity(else_deaths.len); - for (else_deaths) |operand| { + try self.ensureProcessDeathCapacity(liveness_condbr.else_deaths.len); + for (liveness_condbr.else_deaths) |operand| { self.processDeath(operand); } try self.genBody(else_body); @@ -3026,8 +3043,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // assert that parent_branch.free_registers equals the saved_then_branch.free_registers // rather than assigning it. const parent_branch = &self.branch_stack.items[self.branch_stack.items.len - 2]; - try parent_branch.inst_table.ensureCapacity(self.gpa, parent_branch.inst_table.count() + - else_branch.inst_table.count()); + try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, else_branch.inst_table.count()); const else_slice = else_branch.inst_table.entries.slice(); const else_keys = else_slice.items(.key); @@ -3058,11 +3074,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { log.debug("consolidating else_entry {*} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(else_key.ty, canon_mcv, else_value); + try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } - try parent_branch.inst_table.ensureCapacity(self.gpa, parent_branch.inst_table.count() + - saved_then_branch.inst_table.count()); + try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); const then_slice = saved_then_branch.inst_table.entries.slice(); const then_keys = then_slice.items(.key); const then_values = then_slice.items(.value); @@ -3086,13 +3101,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { log.debug("consolidating then_entry {*} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(then_key.ty, parent_mcv, then_value); + try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } self.branch_stack.pop().deinit(self.gpa); - return MCValue.unreach; + return self.finishAir(inst, .unreach, .{ pl_op.operand, .none, .none }); } fn isNull(self: *Self, operand: MCValue) !MCValue { @@ -3131,107 +3146,115 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genIsNull(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand = try self.resolveInst(un_op); - return self.isNull(operand); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand = try self.resolveInst(un_op); + break :result try self.isNull(operand); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genIsNullPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand_ptr = try self.resolveInst(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ptr = try self.resolveInst(un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + break :result try self.isNull(operand); }; - try self.load(operand, ptr); - return self.isNull(operand); + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genIsNonNull(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand = try self.resolveInst(un_op); - return self.isNonNull(operand); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand = try self.resolveInst(un_op); + break :result try self.isNonNull(operand); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand_ptr = try self.resolveInst(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ptr = try self.resolveInst(un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + break :result try self.isNonNull(operand); }; - try self.load(operand, ptr); - return self.isNonNull(operand); + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genIsErr(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand = try self.resolveInst(un_op); - return self.isErr(operand); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand = try self.resolveInst(un_op); + break :result try self.isErr(operand); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genIsErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand_ptr = try self.resolveInst(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ptr = try self.resolveInst(un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + break :result try self.isErr(operand); }; - try self.load(operand, ptr); - return self.isErr(operand); + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genIsNonErr(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand = try self.resolveInst(un_op); - return self.isNonErr(operand); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand = try self.resolveInst(un_op); + break :result try self.isNonErr(operand); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand_ptr = try self.resolveInst(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ptr = try self.resolveInst(un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + break :result try self.isNonErr(operand); }; - try self.load(operand, ptr); - return self.isNonErr(operand); + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genLoop(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airLoop(self: *Self, inst: Air.Inst.Index) !void { // A loop is a setup to be able to jump back to the beginning. const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); @@ -3239,7 +3262,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const start_index = self.code.items.len; try self.genBody(body); try self.jump(start_index); - return MCValue.unreach; + return self.finishAirBookkeeping(); } /// Send control flow to the `index` of `self.code`. @@ -3274,7 +3297,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genBlock(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airBlock(self: *Self, inst: Air.Inst.Index) !void { try self.blocks.putNoClobber(self.gpa, inst, .{ // A block is a setup to be able to jump to the end. .relocs = .{}, @@ -3288,21 +3311,24 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const block_data = self.blocks.getPtr(inst).?; defer block_data.relocs.deinit(self.gpa); - const ty_pl = self.air.instructions.items(.data).ty_pl; + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; try self.genBody(body); for (block_data.relocs.items) |reloc| try self.performReloc(reloc); - return @bitCast(MCValue, block_data.mcv); + const result = @bitCast(MCValue, block_data.mcv); + return self.finishAir(inst, result, .{ .none, .none, .none }); } - fn genSwitch(self: *Self, inst: Air.Inst.Index) !MCValue { - _ = inst; + fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const condition = pl_op.operand; switch (arch) { - else => return self.fail("TODO genSwitch for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO airSwitch for {}", .{self.target.cpu.arch}), } + return self.finishAir(inst, .dead, .{ condition, .none, .none }); } fn performReloc(self: *Self, reloc: Reloc) !void { @@ -3335,54 +3361,49 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genBrBlockFlat(self: *Self, inst: Air.Inst.Index) !MCValue { - try self.genBody(inst.body); - const last = inst.body.instructions[inst.body.instructions.len - 1]; - return self.br(inst.block, last); - } - - fn genBr(self: *Self, inst: Air.Inst.Index) !MCValue { - return self.br(inst.block, inst.operand); + fn airBr(self: *Self, inst: Air.Inst.Index) !void { + const branch = self.air.instructions.items(.data)[inst].br; + try self.br(branch.block_inst, branch.operand); + return self.finishAirBookkeeping(); } - fn genBoolOp(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const air_tags = self.air.instructions.items(.tag); - switch (arch) { + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { .x86_64 => switch (air_tags[inst]) { // lhs AND rhs - .bool_and => return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .bool_and => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), // lhs OR rhs - .bool_or => return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .bool_or => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), else => unreachable, // Not a boolean operation }, .arm, .armeb => switch (air_tags[inst]) { - .bool_and => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_and), - .bool_or => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_or), + .bool_and => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_and), + .bool_or => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_or), else => unreachable, // Not a boolean operation }, else => return self.fail("TODO implement boolean operations for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Index) !MCValue { + fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; - if (operand.ty.hasCodeGenBits()) { + if (self.air.typeOf(operand).hasCodeGenBits()) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { block_data.mcv = operand_mcv; } else { - try self.setRegOrMem(block.base.ty, block_mcv, operand_mcv); + try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); } - fn brVoid(self: *Self, block: Air.Inst.Index) !MCValue { + fn brVoid(self: *Self, block: Air.Inst.Index) !void { const block_data = self.blocks.getPtr(block).?; // Emit a jump with a relocation. It will be patched up after the block ends. @@ -3408,131 +3429,170 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, else => return self.fail("TODO implement brvoid for {}", .{self.target.cpu.arch}), } - return .none; } - fn genAsm(self: *Self, inst: Air.Inst.Index) !MCValue { - if (!inst.is_volatile and self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { - .arm, .armeb => { - for (inst.inputs) |input, i| { - if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail("unrecognized asm input constraint: '{s}'", .{input}); + fn airAsm(self: *Self, inst: Air.Inst.Index) !void { + const air_datas = self.air.instructions.items(.data); + const air_extra = self.air.extraData(Air.Asm, air_datas[inst].ty_pl.payload); + const zir = self.mod_fn.owner_decl.namespace.file_scope.zir; + const extended = zir.instructions.items(.data)[air_extra.data.zir_index].extended; + const zir_extra = zir.extraData(Zir.Inst.Asm, extended.operand); + const asm_source = zir.nullTerminatedString(zir_extra.data.asm_source); + const outputs_len = @truncate(u5, extended.small); + const args_len = @truncate(u5, extended.small >> 5); + const clobbers_len = @truncate(u5, extended.small >> 10); + _ = clobbers_len; // TODO honor these + const is_volatile = @truncate(u1, extended.small >> 15) != 0; + const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[air_extra.end..][0..outputs_len]); + const args = @bitCast([]const Air.Inst.Ref, self.air.extra[air_extra.end + outputs.len ..][0..args_len]); + + if (outputs_len > 1) { + return self.fail("TODO implement codegen for asm with more than 1 output", .{}); + } + var extra_i: usize = zir_extra.end; + const output_constraint: ?[]const u8 = out: { + var i: usize = 0; + while (i < outputs_len) : (i += 1) { + const output = zir.extraData(Zir.Inst.Asm.Output, extra_i); + extra_i = output.end; + break :out zir.nullTerminatedString(output.data.constraint); + } + break :out null; + }; + + const dead = !is_volatile and self.liveness.isUnused(inst); + const result: MCValue = if (dead) .dead else switch (arch) { + .arm, .armeb => result: { + for (args) |arg| { + const input = zir.extraData(Zir.Inst.Asm.Input, extra_i); + extra_i = input.end; + const constraint = zir.nullTerminatedString(input.data.constraint); + + if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') { + return self.fail("unrecognized asm input constraint: '{s}'", .{constraint}); } - const reg_name = input[1 .. input.len - 1]; + const reg_name = constraint[1 .. constraint.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(self.air.typeOf(arg), reg, arg_mcv); } - if (mem.eql(u8, inst.asm_source, "svc #0")) { + if (mem.eql(u8, asm_source, "svc #0")) { writeInt(u32, try self.code.addManyAsArray(4), Instruction.svc(.al, 0).toU32()); } else { return self.fail("TODO implement support for more arm assembly instructions", .{}); } - if (inst.output_constraint) |output| { + if (output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - return MCValue{ .register = reg }; + + break :result MCValue{ .register = reg }; } else { - return MCValue.none; + break :result MCValue.none; } }, - .aarch64 => { - for (inst.inputs) |input, i| { - if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail("unrecognized asm input constraint: '{s}'", .{input}); + .aarch64 => result: { + for (args) |arg| { + const input = zir.extraData(Zir.Inst.Asm.Input, extra_i); + extra_i = input.end; + const constraint = zir.nullTerminatedString(input.data.constraint); + + if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') { + return self.fail("unrecognized asm input constraint: '{s}'", .{constraint}); } - const reg_name = input[1 .. input.len - 1]; + const reg_name = constraint[1 .. constraint.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(self.air.typeOf(arg), reg, arg_mcv); } - if (mem.eql(u8, inst.asm_source, "svc #0")) { + if (mem.eql(u8, asm_source, "svc #0")) { mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.svc(0x0).toU32()); - } else if (mem.eql(u8, inst.asm_source, "svc #0x80")) { + } else if (mem.eql(u8, asm_source, "svc #0x80")) { mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.svc(0x80).toU32()); } else { return self.fail("TODO implement support for more aarch64 assembly instructions", .{}); } - if (inst.output_constraint) |output| { + if (output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - return MCValue{ .register = reg }; + break :result MCValue{ .register = reg }; } else { - return MCValue.none; + break :result MCValue.none; } }, - .riscv64 => { - for (inst.inputs) |input, i| { - if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail("unrecognized asm input constraint: '{s}'", .{input}); + .riscv64 => result: { + for (args) |arg| { + const input = zir.extraData(Zir.Inst.Asm.Input, extra_i); + extra_i = input.end; + const constraint = zir.nullTerminatedString(input.data.constraint); + + if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') { + return self.fail("unrecognized asm input constraint: '{s}'", .{constraint}); } - const reg_name = input[1 .. input.len - 1]; + const reg_name = constraint[1 .. constraint.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(self.air.typeOf(arg), reg, arg_mcv); } - if (mem.eql(u8, inst.asm_source, "ecall")) { + if (mem.eql(u8, asm_source, "ecall")) { mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ecall.toU32()); } else { return self.fail("TODO implement support for more riscv64 assembly instructions", .{}); } - if (inst.output_constraint) |output| { + if (output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - return MCValue{ .register = reg }; + break :result MCValue{ .register = reg }; } else { - return MCValue.none; + break :result MCValue.none; } }, - .x86_64, .i386 => { - for (inst.inputs) |input, i| { - if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail("unrecognized asm input constraint: '{s}'", .{input}); + .x86_64, .i386 => result: { + for (args) |arg| { + const input = zir.extraData(Zir.Inst.Asm.Input, extra_i); + extra_i = input.end; + const constraint = zir.nullTerminatedString(input.data.constraint); + + if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') { + return self.fail("unrecognized asm input constraint: '{s}'", .{constraint}); } - const reg_name = input[1 .. input.len - 1]; + const reg_name = constraint[1 .. constraint.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(self.air.typeOf(arg), reg, arg_mcv); } { - var iter = std.mem.tokenize(inst.asm_source, "\n\r"); + var iter = std.mem.tokenize(asm_source, "\n\r"); while (iter.next()) |ins| { if (mem.eql(u8, ins, "syscall")) { try self.code.appendSlice(&[_]u8{ 0x0f, 0x05 }); @@ -3571,20 +3631,27 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - if (inst.output_constraint) |output| { + if (output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - return MCValue{ .register = reg }; + break :result MCValue{ .register = reg }; } else { - return MCValue.none; + break :result MCValue{ .none = {} }; } }, else => return self.fail("TODO implement inline asm support for more architectures", .{}), + }; + if (outputs.len + args.len <= Liveness.bpi - 1) { + var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); + std.mem.copy(Air.Inst.Ref, &buf, outputs); + std.mem.copy(Air.Inst.Ref, buf[outputs.len..], args); + return self.finishAir(inst, result, buf); } + @panic("TODO: codegen for asm with greater than 3 args"); } /// Sets the value without any modifications to register allocation metadata or stack allocation metadata. @@ -3761,7 +3828,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); }, .register => |reg| { - try self.genX8664ModRMRegToStack(src, ty, stack_offset, reg, 0x89); + try self.genX8664ModRMRegToStack(ty, stack_offset, reg, 0x89); }, .memory => |vaddr| { _ = vaddr; @@ -4409,32 +4476,48 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genPtrToInt(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - return self.resolveInst(un_op); + const result = try self.resolveInst(un_op); + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genBitCast(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - return self.resolveInst(ty_op.operand); + const result = try self.resolveInst(ty_op.operand); + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn resolveInst(self: *Self, inst: Air.Inst.Index) !MCValue { - // If the type has no codegen bits, no need to store it. - if (!inst.ty.hasCodeGenBits()) - return MCValue.none; - - // Constants have static lifetimes, so they are always memoized in the outer most table. - if (inst.castTag(.constant)) |const_inst| { - const branch = &self.branch_stack.items[0]; - const gop = try branch.inst_table.getOrPut(self.gpa, inst); - if (!gop.found_existing) { - gop.value_ptr.* = try self.genTypedValue(inst.src, .{ .ty = inst.ty, .val = const_inst.val }); - } - return gop.value_ptr.*; + fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { + // First section of indexes correspond to a set number of constant values. + const ref_int = @enumToInt(inst); + if (ref_int < Air.Inst.Ref.typed_value_map.len) { + return self.genTypedValue(Air.Inst.Ref.typed_value_map[ref_int]); } - return self.getResolvedInstValue(inst); + // If the type has no codegen bits, no need to store it. + const inst_ty = self.air.typeOf(inst); + if (!inst_ty.hasCodeGenBits()) + return MCValue{ .none = {} }; + + const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + switch (self.air.instructions.items(.tag)[inst_index]) { + .constant => { + // Constants have static lifetimes, so they are always memoized in the outer most table. + const branch = &self.branch_stack.items[0]; + const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); + if (!gop.found_existing) { + const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + gop.value_ptr.* = try self.genTypedValue(.{ + .ty = inst_ty, + .val = self.air.values[ty_pl.payload], + }); + } + return gop.value_ptr.*; + }, + .const_ty => unreachable, + else => return self.getResolvedInstValue(inst_index), + } } fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { @@ -4454,8 +4537,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// A potential opportunity for future optimization here would be keeping track /// of the fact that the instruction is available both as an immediate /// and as a register. - fn limitImmediateType(self: *Self, inst: Air.Inst.Index, comptime T: type) !MCValue { - const mcv = try self.resolveInst(inst); + fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCValue { + const mcv = try self.resolveInst(operand); const ti = @typeInfo(T).Int; switch (mcv) { .immediate => |imm| { @@ -4470,7 +4553,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return mcv; } - fn genTypedValue(self: *Self, src: LazySrcLoc, typed_value: TypedValue) InnerError!MCValue { + fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { if (typed_value.val.isUndef()) return MCValue{ .undef = {} }; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); @@ -4480,7 +4563,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .Slice => { var buf: Type.Payload.ElemType = undefined; const ptr_type = typed_value.ty.slicePtrFieldType(&buf); - const ptr_mcv = try self.genTypedValue(src, .{ .ty = ptr_type, .val = typed_value.val }); + const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val }); const slice_len = typed_value.val.sliceLen(); // Codegen can't handle some kinds of indirection. If the wrong union field is accessed here it may mean // the Sema code needs to use anonymous Decls or alloca instructions to store data. @@ -4541,7 +4624,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return MCValue{ .immediate = 0 }; var buf: Type.Payload.ElemType = undefined; - return self.genTypedValue(src, .{ + return self.genTypedValue(.{ .ty = typed_value.ty.optionalChild(&buf), .val = typed_value.val, }); -- cgit v1.2.3 From ea902ffe8f5f337b04f25b4efc69599db74d99ce Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 19 Jul 2021 17:35:14 -0700 Subject: Sema: reimplement runtime switch Now supports multiple items pointing to the same body. This is a common pattern even when using a jump table, with multiple cases pointing to the same block of code. In the case of a range specified, the items are moved to branches in the else body. A future improvement may make it possible to have jump table items as well as ranges pointing to the same block of code. --- src/Air.zig | 3 +- src/Module.zig | 6 +- src/Sema.zig | 338 +++++++++++++++++++++++++++++---------------------- src/codegen/wasm.zig | 81 ++++++------ 4 files changed, 240 insertions(+), 188 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Air.zig b/src/Air.zig index 0e19202244..718123818b 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -352,9 +352,10 @@ pub const SwitchBr = struct { else_body_len: u32, /// Trailing: + /// * item: Inst.Ref // for each `items_len`. /// * instruction index for each `body_len`. pub const Case = struct { - item: Inst.Ref, + items_len: u32, body_len: u32, }; }; diff --git a/src/Module.zig b/src/Module.zig index c101221f2e..9fadf67c6f 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1300,6 +1300,10 @@ pub const Scope = struct { } pub fn addInst(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref { + return Air.indexToRef(try block.addInstAsIndex(inst)); + } + + pub fn addInstAsIndex(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Index { const sema = block.sema; const gpa = sema.gpa; @@ -1309,7 +1313,7 @@ pub const Scope = struct { const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len); sema.air_instructions.appendAssumeCapacity(inst); block.instructions.appendAssumeCapacity(result_index); - return Air.indexToRef(result_index); + return result_index; } }; }; diff --git a/src/Sema.zig b/src/Sema.zig index b3feeb8b1c..b9449157e2 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -4170,159 +4170,201 @@ fn analyzeSwitch( try sema.requireRuntimeBlock(block, src); - // TODO when reworking AIR memory layout make multi cases get generated as cases, - // not as part of the "else" block. - return mod.fail(&block.base, src, "TODO rework runtime switch Sema", .{}); - //const cases = try sema.arena.alloc(Inst.SwitchBr.Case, scalar_cases_len); - - //var case_block = child_block.makeSubBlock(); - //case_block.runtime_loop = null; - //case_block.runtime_cond = operand.src; - //case_block.runtime_index += 1; - //defer case_block.instructions.deinit(gpa); - - //var extra_index: usize = special.end; - - //var scalar_i: usize = 0; - //while (scalar_i < scalar_cases_len) : (scalar_i += 1) { - // const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); - // extra_index += 1; - // const body_len = sema.code.extra[extra_index]; - // extra_index += 1; - // const body = sema.code.extra[extra_index..][0..body_len]; - // extra_index += body_len; + var cases_extra: std.ArrayListUnmanaged(u32) = .{}; + defer cases_extra.deinit(gpa); - // case_block.instructions.shrinkRetainingCapacity(0); - // const item = sema.resolveInst(item_ref); - // // We validate these above; these two calls are guaranteed to succeed. - // const item_val = sema.resolveConstValue(&case_block, .unneeded, item) catch unreachable; + try cases_extra.ensureTotalCapacity(gpa, (scalar_cases_len + multi_cases_len) * + @typeInfo(Air.SwitchBr.Case).Struct.fields.len + 2); - // _ = try sema.analyzeBody(&case_block, body); + var case_block = child_block.makeSubBlock(); + case_block.runtime_loop = null; + case_block.runtime_cond = operand_src; + case_block.runtime_index += 1; + defer case_block.instructions.deinit(gpa); - // cases[scalar_i] = .{ - // .item = item_val, - // .body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items) }, - // }; - //} + var extra_index: usize = special.end; - //var first_else_body: Body = undefined; - //var prev_condbr: ?*Inst.CondBr = null; + var scalar_i: usize = 0; + while (scalar_i < scalar_cases_len) : (scalar_i += 1) { + const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); + extra_index += 1; + const body_len = sema.code.extra[extra_index]; + extra_index += 1; + const body = sema.code.extra[extra_index..][0..body_len]; + extra_index += body_len; - //var multi_i: usize = 0; - //while (multi_i < multi_cases_len) : (multi_i += 1) { - // const items_len = sema.code.extra[extra_index]; - // extra_index += 1; - // const ranges_len = sema.code.extra[extra_index]; - // extra_index += 1; - // const body_len = sema.code.extra[extra_index]; - // extra_index += 1; - // const items = sema.code.refSlice(extra_index, items_len); - // extra_index += items_len; - - // case_block.instructions.shrinkRetainingCapacity(0); - - // var any_ok: ?Air.Inst.Index = null; - - // for (items) |item_ref| { - // const item = sema.resolveInst(item_ref); - // _ = try sema.resolveConstValue(&child_block, item.src, item); - - // const cmp_ok = try case_block.addBinOp(.cmp_eq, operand, item); - // if (any_ok) |some| { - // any_ok = try case_block.addBinOp(.bool_or, some, cmp_ok); - // } else { - // any_ok = cmp_ok; - // } - // } - - // var range_i: usize = 0; - // while (range_i < ranges_len) : (range_i += 1) { - // const first_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); - // extra_index += 1; - // const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); - // extra_index += 1; - - // const item_first = sema.resolveInst(first_ref); - // const item_last = sema.resolveInst(last_ref); - - // _ = try sema.resolveConstValue(&child_block, item_first.src, item_first); - // _ = try sema.resolveConstValue(&child_block, item_last.src, item_last); - - // // operand >= first and operand <= last - // const range_first_ok = try case_block.addBinOp( - // .cmp_gte, - // operand, - // item_first, - // ); - // const range_last_ok = try case_block.addBinOp( - // .cmp_lte, - // operand, - // item_last, - // ); - // const range_ok = try case_block.addBinOp( - // .bool_and, - // range_first_ok, - // range_last_ok, - // ); - // if (any_ok) |some| { - // any_ok = try case_block.addBinOp(.bool_or, some, range_ok); - // } else { - // any_ok = range_ok; - // } - // } - - // const new_condbr = try sema.arena.create(Inst.CondBr); - // new_condbr.* = .{ - // .base = .{ - // .tag = .condbr, - // .ty = Type.initTag(.noreturn), - // .src = src, - // }, - // .condition = any_ok.?, - // .then_body = undefined, - // .else_body = undefined, - // }; - // try case_block.instructions.append(gpa, &new_condbr.base); - - // const cond_body: Body = .{ - // .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), - // }; - - // case_block.instructions.shrinkRetainingCapacity(0); - // const body = sema.code.extra[extra_index..][0..body_len]; - // extra_index += body_len; - // _ = try sema.analyzeBody(&case_block, body); - // new_condbr.then_body = .{ - // .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), - // }; - // if (prev_condbr) |condbr| { - // condbr.else_body = cond_body; - // } else { - // first_else_body = cond_body; - // } - // prev_condbr = new_condbr; - //} - - //const final_else_body: Body = blk: { - // if (special.body.len != 0) { - // case_block.instructions.shrinkRetainingCapacity(0); - // _ = try sema.analyzeBody(&case_block, special.body); - // const else_body: Body = .{ - // .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), - // }; - // if (prev_condbr) |condbr| { - // condbr.else_body = else_body; - // break :blk first_else_body; - // } else { - // break :blk else_body; - // } - // } else { - // break :blk .{ .instructions = &.{} }; - // } - //}; - - //_ = try child_block.addSwitchBr(src, operand, cases, final_else_body); - //return sema.analyzeBlockBody(block, src, &child_block, merges); + case_block.instructions.shrinkRetainingCapacity(0); + const item = sema.resolveInst(item_ref); + // `item` is already guaranteed to be constant known. + + _ = try sema.analyzeBody(&case_block, body); + + try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); + cases_extra.appendAssumeCapacity(1); // items_len + cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@enumToInt(item)); + cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); + } + + var is_first = true; + var prev_cond_br: Air.Inst.Index = undefined; + var first_else_body: []const Air.Inst.Index = &.{}; + defer gpa.free(first_else_body); + var prev_then_body: []const Air.Inst.Index = &.{}; + defer gpa.free(prev_then_body); + + var multi_i: usize = 0; + while (multi_i < multi_cases_len) : (multi_i += 1) { + const items_len = sema.code.extra[extra_index]; + extra_index += 1; + const ranges_len = sema.code.extra[extra_index]; + extra_index += 1; + const body_len = sema.code.extra[extra_index]; + extra_index += 1; + const items = sema.code.refSlice(extra_index, items_len); + extra_index += items_len; + + case_block.instructions.shrinkRetainingCapacity(0); + + var any_ok: Air.Inst.Ref = .none; + + // If there are any ranges, we have to put all the items into the + // else prong. Otherwise, we can take advantage of multiple items + // mapping to the same body. + if (ranges_len == 0) { + const body = sema.code.extra[extra_index..][0..body_len]; + extra_index += body_len; + _ = try sema.analyzeBody(&case_block, body); + + try cases_extra.ensureUnusedCapacity(gpa, 2 + items.len + + case_block.instructions.items.len); + + cases_extra.appendAssumeCapacity(1); // items_len + cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + + for (items) |item_ref| { + const item = sema.resolveInst(item_ref); + cases_extra.appendAssumeCapacity(@enumToInt(item)); + } + + cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); + } else { + for (items) |item_ref| { + const item = sema.resolveInst(item_ref); + const cmp_ok = try case_block.addBinOp(.cmp_eq, operand, item); + if (any_ok != .none) { + any_ok = try case_block.addBinOp(.bool_or, any_ok, cmp_ok); + } else { + any_ok = cmp_ok; + } + } + + var range_i: usize = 0; + while (range_i < ranges_len) : (range_i += 1) { + const first_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); + extra_index += 1; + const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); + extra_index += 1; + + const item_first = sema.resolveInst(first_ref); + const item_last = sema.resolveInst(last_ref); + + // operand >= first and operand <= last + const range_first_ok = try case_block.addBinOp( + .cmp_gte, + operand, + item_first, + ); + const range_last_ok = try case_block.addBinOp( + .cmp_lte, + operand, + item_last, + ); + const range_ok = try case_block.addBinOp( + .bool_and, + range_first_ok, + range_last_ok, + ); + if (any_ok != .none) { + any_ok = try case_block.addBinOp(.bool_or, any_ok, range_ok); + } else { + any_ok = range_ok; + } + } + + const new_cond_br = try case_block.addInstAsIndex(.{ .tag = .cond_br, .data = .{ + .pl_op = .{ + .operand = any_ok, + .payload = undefined, + }, + } }); + var cond_body = case_block.instructions.toOwnedSlice(gpa); + defer gpa.free(cond_body); + + case_block.instructions.shrinkRetainingCapacity(0); + const body = sema.code.extra[extra_index..][0..body_len]; + extra_index += body_len; + _ = try sema.analyzeBody(&case_block, body); + + if (is_first) { + is_first = false; + first_else_body = cond_body; + cond_body = &.{}; + } else { + try sema.air_extra.ensureUnusedCapacity( + gpa, + @typeInfo(Air.CondBr).Struct.fields.len + prev_then_body.len + cond_body.len, + ); + + sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload = + sema.addExtraAssumeCapacity(Air.CondBr{ + .then_body_len = @intCast(u32, prev_then_body.len), + .else_body_len = @intCast(u32, cond_body.len), + }); + sema.air_extra.appendSliceAssumeCapacity(prev_then_body); + sema.air_extra.appendSliceAssumeCapacity(cond_body); + } + prev_then_body = case_block.instructions.toOwnedSlice(gpa); + prev_cond_br = new_cond_br; + } + } + + var final_else_body: []const Air.Inst.Index = &.{}; + if (special.body.len != 0) { + case_block.instructions.shrinkRetainingCapacity(0); + _ = try sema.analyzeBody(&case_block, special.body); + + if (is_first) { + final_else_body = case_block.instructions.items; + } else { + try sema.air_extra.ensureUnusedCapacity(gpa, prev_then_body.len + + @typeInfo(Air.CondBr).Struct.fields.len + case_block.instructions.items.len); + + sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload = + sema.addExtraAssumeCapacity(Air.CondBr{ + .then_body_len = @intCast(u32, prev_then_body.len), + .else_body_len = @intCast(u32, case_block.instructions.items.len), + }); + sema.air_extra.appendSliceAssumeCapacity(prev_then_body); + sema.air_extra.appendSliceAssumeCapacity(case_block.instructions.items); + final_else_body = first_else_body; + } + } + + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr).Struct.fields.len + + cases_extra.items.len); + + _ = try child_block.addInst(.{ .tag = .switch_br, .data = .{ .pl_op = .{ + .operand = operand, + .payload = sema.addExtraAssumeCapacity(Air.SwitchBr{ + .cases_len = @intCast(u32, scalar_cases_len + multi_cases_len), + .else_body_len = @intCast(u32, final_else_body.len), + }), + } } }); + sema.air_extra.appendSliceAssumeCapacity(cases_extra.items); + sema.air_extra.appendSliceAssumeCapacity(final_else_body); + + return sema.analyzeBlockBody(block, src, &child_block, merges); } fn resolveSwitchItemVal( diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index e72140d826..41397f55f4 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -1282,44 +1282,49 @@ pub const Context = struct { // result type is always 'noreturn' const blocktype = wasm.block_empty; - const signedness: std.builtin.Signedness = blk: { - // by default we tell the operand type is unsigned (i.e. bools and enum values) - if (target_ty.zigTypeTag() != .Int) break :blk .unsigned; - - // incase of an actual integer, we emit the correct signedness - break :blk target_ty.intInfo(self.target).signedness; - }; - for (cases) |case_idx| { - const case = self.air.extraData(Air.SwitchBr.Case, case_idx); - const case_body = self.air.extra[case.end..][0..case.data.body_len]; - - // create a block for each case, when the condition does not match we break out of it - try self.startBlock(.block, blocktype, null); - try self.emitWValue(target); - - const val = self.air.value(case.data.item).?; - try self.emitConstant(val, target_ty); - const opcode = buildOpcode(.{ - .valtype1 = valtype, - .op = .ne, // not equal because we jump out the block if it does not match the condition - .signedness = signedness, - }); - try self.code.append(wasm.opcode(opcode)); - try self.code.append(wasm.opcode(.br_if)); - try leb.writeULEB128(self.code.writer(), @as(u32, 0)); - - // emit our block code - try self.genBody(case_body); - - // end the block we created earlier - try self.endBlock(); - } - - // finally, emit the else case if it exists. Here we will not have to - // check for a condition, so also no need to emit a block. - try self.genBody(else_body); - - return .none; + _ = valtype; + _ = blocktype; + _ = target; + _ = else_body; + return self.fail("TODO implement wasm codegen for switch", .{}); + //const signedness: std.builtin.Signedness = blk: { + // // by default we tell the operand type is unsigned (i.e. bools and enum values) + // if (target_ty.zigTypeTag() != .Int) break :blk .unsigned; + + // // incase of an actual integer, we emit the correct signedness + // break :blk target_ty.intInfo(self.target).signedness; + //}; + //for (cases) |case_idx| { + // const case = self.air.extraData(Air.SwitchBr.Case, case_idx); + // const case_body = self.air.extra[case.end..][0..case.data.body_len]; + + // // create a block for each case, when the condition does not match we break out of it + // try self.startBlock(.block, blocktype, null); + // try self.emitWValue(target); + + // const val = self.air.value(case.data.item).?; + // try self.emitConstant(val, target_ty); + // const opcode = buildOpcode(.{ + // .valtype1 = valtype, + // .op = .ne, // not equal because we jump out the block if it does not match the condition + // .signedness = signedness, + // }); + // try self.code.append(wasm.opcode(opcode)); + // try self.code.append(wasm.opcode(.br_if)); + // try leb.writeULEB128(self.code.writer(), @as(u32, 0)); + + // // emit our block code + // try self.genBody(case_body); + + // // end the block we created earlier + // try self.endBlock(); + //} + + //// finally, emit the else case if it exists. Here we will not have to + //// check for a condition, so also no need to emit a block. + //try self.genBody(else_body); + + //return .none; } fn airIsErr(self: *Context, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!WValue { -- cgit v1.2.3 From fe14e339458a578657f3890f00d654a15c84422c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 20 Jul 2021 15:22:37 -0700 Subject: stage2: separate work queue item for functions than decls Previously we had codegen_decl for both constant values as well as function bodies. A recent commit updated the linker backends to add updateFunc as a separate function than updateDecl, and now this commit does the same with work queue tasks. The frontend now distinguishes between function pointers and function bodies. --- src/Compilation.zig | 158 ++++++++++++++++++++++++++++++++++------------- src/Module.zig | 174 +++++++++++++++++++++++++++------------------------- 2 files changed, 203 insertions(+), 129 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index 50d1f5760e..ea484c2d15 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -169,8 +169,10 @@ pub const CSourceFile = struct { }; const Job = union(enum) { - /// Write the machine code for a Decl to the output file. + /// Write the constant value for a Decl to the output file. codegen_decl: *Module.Decl, + /// Write the machine code for a function to the output file. + codegen_func: *Module.Fn, /// Render the .h file snippet for the Decl. emit_h_decl: *Module.Decl, /// The Decl needs to be analyzed and possibly export itself. @@ -2006,54 +2008,56 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor const module = self.bin_file.options.module.?; assert(decl.has_tv); if (decl.val.castTag(.function)) |payload| { - const func = payload.data; + if (decl.owns_tv) { + const func = payload.data; + + var air = switch (func.state) { + .sema_failure, .dependency_failure => continue, + .queued => module.analyzeFnBody(decl, func) catch |err| switch (err) { + error.AnalysisFail => { + assert(func.state != .in_progress); + continue; + }, + error.OutOfMemory => return error.OutOfMemory, + }, + .in_progress => unreachable, + .inline_only => unreachable, // don't queue work for this + .success => unreachable, // don't queue it twice + }; + defer air.deinit(gpa); + + log.debug("analyze liveness of {s}", .{decl.name}); + var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); + defer liveness.deinit(gpa); + + if (builtin.mode == .Debug and self.verbose_air) { + std.debug.print("# Begin Function AIR: {s}:\n", .{decl.name}); + @import("print_air.zig").dump(gpa, air, liveness); + std.debug.print("# End Function AIR: {s}:\n", .{decl.name}); + } - var air = switch (func.state) { - .queued => module.analyzeFnBody(decl, func) catch |err| switch (err) { + assert(decl.ty.hasCodeGenBits()); + + self.bin_file.updateFunc(module, func, air, liveness) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { - assert(func.state != .in_progress); + decl.analysis = .codegen_failure; continue; }, - error.OutOfMemory => return error.OutOfMemory, - }, - .in_progress => unreachable, - .inline_only => unreachable, // don't queue work for this - .sema_failure, .dependency_failure => continue, - .success => unreachable, // don't queue it twice - }; - defer air.deinit(gpa); - - log.debug("analyze liveness of {s}", .{decl.name}); - var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); - defer liveness.deinit(gpa); - - if (builtin.mode == .Debug and self.verbose_air) { - std.debug.print("# Begin Function AIR: {s}:\n", .{decl.name}); - @import("print_air.zig").dump(gpa, air, liveness); - std.debug.print("# End Function AIR: {s}:\n", .{decl.name}); + else => { + try module.failed_decls.ensureUnusedCapacity(gpa, 1); + module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( + gpa, + decl.srcLoc(), + "unable to codegen: {s}", + .{@errorName(err)}, + )); + decl.analysis = .codegen_failure_retryable; + continue; + }, + }; + continue; } - - assert(decl.ty.hasCodeGenBits()); - - self.bin_file.updateFunc(module, func, air, liveness) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => { - decl.analysis = .codegen_failure; - continue; - }, - else => { - try module.failed_decls.ensureUnusedCapacity(gpa, 1); - module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( - gpa, - decl.srcLoc(), - "unable to codegen: {s}", - .{@errorName(err)}, - )); - decl.analysis = .codegen_failure_retryable; - continue; - }, - }; - continue; } assert(decl.ty.hasCodeGenBits()); @@ -2078,6 +2082,72 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor }; }, }, + .codegen_func => |func| switch (func.owner_decl.analysis) { + .unreferenced => unreachable, + .in_progress => unreachable, + .outdated => unreachable, + + .file_failure, + .sema_failure, + .codegen_failure, + .dependency_failure, + .sema_failure_retryable, + => continue, + + .complete, .codegen_failure_retryable => { + if (build_options.omit_stage2) + @panic("sadly stage2 is omitted from this build to save memory on the CI server"); + switch (func.state) { + .sema_failure, .dependency_failure => continue, + .queued => {}, + .in_progress => unreachable, + .inline_only => unreachable, // don't queue work for this + .success => unreachable, // don't queue it twice + } + + const module = self.bin_file.options.module.?; + const decl = func.owner_decl; + + var air = module.analyzeFnBody(decl, func) catch |err| switch (err) { + error.AnalysisFail => { + assert(func.state != .in_progress); + continue; + }, + error.OutOfMemory => return error.OutOfMemory, + }; + defer air.deinit(gpa); + + log.debug("analyze liveness of {s}", .{decl.name}); + var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); + defer liveness.deinit(gpa); + + if (builtin.mode == .Debug and self.verbose_air) { + std.debug.print("# Begin Function AIR: {s}:\n", .{decl.name}); + @import("print_air.zig").dump(gpa, air, liveness); + std.debug.print("# End Function AIR: {s}:\n", .{decl.name}); + } + + self.bin_file.updateFunc(module, func, air, liveness) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => { + decl.analysis = .codegen_failure; + continue; + }, + else => { + try module.failed_decls.ensureUnusedCapacity(gpa, 1); + module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( + gpa, + decl.srcLoc(), + "unable to codegen: {s}", + .{@errorName(err)}, + )); + decl.analysis = .codegen_failure_retryable; + continue; + }, + }; + continue; + }, + }, .emit_h_decl => |decl| switch (decl.analysis) { .unreferenced => unreachable, .in_progress => unreachable, diff --git a/src/Module.zig b/src/Module.zig index 9fadf67c6f..4930e7846c 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -2902,6 +2902,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { decl.generation = mod.generation; return false; } + log.debug("semaDecl {*} ({s})", .{ decl, decl.name }); var block_scope: Scope.Block = .{ .parent = null, @@ -2938,106 +2939,109 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State); if (decl_tv.val.castTag(.function)) |fn_payload| { - var prev_type_has_bits = false; - var prev_is_inline = false; - var type_changed = true; - - if (decl.has_tv) { - prev_type_has_bits = decl.ty.hasCodeGenBits(); - type_changed = !decl.ty.eql(decl_tv.ty); - if (decl.getFunction()) |prev_func| { - prev_is_inline = prev_func.state == .inline_only; + const func = fn_payload.data; + const owns_tv = func.owner_decl == decl; + if (owns_tv) { + var prev_type_has_bits = false; + var prev_is_inline = false; + var type_changed = true; + + if (decl.has_tv) { + prev_type_has_bits = decl.ty.hasCodeGenBits(); + type_changed = !decl.ty.eql(decl_tv.ty); + if (decl.getFunction()) |prev_func| { + prev_is_inline = prev_func.state == .inline_only; + } + decl.clearValues(gpa); } - decl.clearValues(gpa); - } - - decl.ty = try decl_tv.ty.copy(&decl_arena.allocator); - decl.val = try decl_tv.val.copy(&decl_arena.allocator); - decl.align_val = try align_val.copy(&decl_arena.allocator); - decl.linksection_val = try linksection_val.copy(&decl_arena.allocator); - decl.has_tv = true; - decl.owns_tv = fn_payload.data.owner_decl == decl; - decl_arena_state.* = decl_arena.state; - decl.value_arena = decl_arena_state; - decl.analysis = .complete; - decl.generation = mod.generation; - const is_inline = decl_tv.ty.fnCallingConvention() == .Inline; - if (!is_inline and decl_tv.ty.hasCodeGenBits()) { - // We don't fully codegen the decl until later, but we do need to reserve a global - // offset table index for it. This allows us to codegen decls out of dependency order, - // increasing how many computations can be done in parallel. - try mod.comp.bin_file.allocateDeclIndexes(decl); - try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl }); - if (type_changed and mod.emit_h != null) { - try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); + decl.ty = try decl_tv.ty.copy(&decl_arena.allocator); + decl.val = try decl_tv.val.copy(&decl_arena.allocator); + decl.align_val = try align_val.copy(&decl_arena.allocator); + decl.linksection_val = try linksection_val.copy(&decl_arena.allocator); + decl.has_tv = true; + decl.owns_tv = owns_tv; + decl_arena_state.* = decl_arena.state; + decl.value_arena = decl_arena_state; + decl.analysis = .complete; + decl.generation = mod.generation; + + const is_inline = decl_tv.ty.fnCallingConvention() == .Inline; + if (!is_inline and decl_tv.ty.hasCodeGenBits()) { + // We don't fully codegen the decl until later, but we do need to reserve a global + // offset table index for it. This allows us to codegen decls out of dependency order, + // increasing how many computations can be done in parallel. + try mod.comp.bin_file.allocateDeclIndexes(decl); + try mod.comp.work_queue.writeItem(.{ .codegen_func = func }); + if (type_changed and mod.emit_h != null) { + try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); + } + } else if (!prev_is_inline and prev_type_has_bits) { + mod.comp.bin_file.freeDecl(decl); } - } else if (!prev_is_inline and prev_type_has_bits) { - mod.comp.bin_file.freeDecl(decl); - } - if (decl.is_exported) { - const export_src = src; // TODO make this point at `export` token - if (is_inline) { - return mod.fail(&block_scope.base, export_src, "export of inline function", .{}); + if (decl.is_exported) { + const export_src = src; // TODO make this point at `export` token + if (is_inline) { + return mod.fail(&block_scope.base, export_src, "export of inline function", .{}); + } + // The scope needs to have the decl in it. + try mod.analyzeExport(&block_scope.base, export_src, mem.spanZ(decl.name), decl); } - // The scope needs to have the decl in it. - try mod.analyzeExport(&block_scope.base, export_src, mem.spanZ(decl.name), decl); - } - return type_changed or is_inline != prev_is_inline; - } else { - var type_changed = true; - if (decl.has_tv) { - type_changed = !decl.ty.eql(decl_tv.ty); - decl.clearValues(gpa); + return type_changed or is_inline != prev_is_inline; } + } + var type_changed = true; + if (decl.has_tv) { + type_changed = !decl.ty.eql(decl_tv.ty); + decl.clearValues(gpa); + } - decl.owns_tv = false; - var queue_linker_work = false; - if (decl_tv.val.castTag(.variable)) |payload| { - const variable = payload.data; - if (variable.owner_decl == decl) { - decl.owns_tv = true; - queue_linker_work = true; + decl.owns_tv = false; + var queue_linker_work = false; + if (decl_tv.val.castTag(.variable)) |payload| { + const variable = payload.data; + if (variable.owner_decl == decl) { + decl.owns_tv = true; + queue_linker_work = true; - const copied_init = try variable.init.copy(&decl_arena.allocator); - variable.init = copied_init; - } - } else if (decl_tv.val.castTag(.extern_fn)) |payload| { - const owner_decl = payload.data; - if (decl == owner_decl) { - decl.owns_tv = true; - queue_linker_work = true; - } + const copied_init = try variable.init.copy(&decl_arena.allocator); + variable.init = copied_init; } + } else if (decl_tv.val.castTag(.extern_fn)) |payload| { + const owner_decl = payload.data; + if (decl == owner_decl) { + decl.owns_tv = true; + queue_linker_work = true; + } + } - decl.ty = try decl_tv.ty.copy(&decl_arena.allocator); - decl.val = try decl_tv.val.copy(&decl_arena.allocator); - decl.align_val = try align_val.copy(&decl_arena.allocator); - decl.linksection_val = try linksection_val.copy(&decl_arena.allocator); - decl.has_tv = true; - decl_arena_state.* = decl_arena.state; - decl.value_arena = decl_arena_state; - decl.analysis = .complete; - decl.generation = mod.generation; - - if (queue_linker_work and decl.ty.hasCodeGenBits()) { - try mod.comp.bin_file.allocateDeclIndexes(decl); - try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl }); + decl.ty = try decl_tv.ty.copy(&decl_arena.allocator); + decl.val = try decl_tv.val.copy(&decl_arena.allocator); + decl.align_val = try align_val.copy(&decl_arena.allocator); + decl.linksection_val = try linksection_val.copy(&decl_arena.allocator); + decl.has_tv = true; + decl_arena_state.* = decl_arena.state; + decl.value_arena = decl_arena_state; + decl.analysis = .complete; + decl.generation = mod.generation; - if (type_changed and mod.emit_h != null) { - try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); - } - } + if (queue_linker_work and decl.ty.hasCodeGenBits()) { + try mod.comp.bin_file.allocateDeclIndexes(decl); + try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl }); - if (decl.is_exported) { - const export_src = src; // TODO point to the export token - // The scope needs to have the decl in it. - try mod.analyzeExport(&block_scope.base, export_src, mem.spanZ(decl.name), decl); + if (type_changed and mod.emit_h != null) { + try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); } + } - return type_changed; + if (decl.is_exported) { + const export_src = src; // TODO point to the export token + // The scope needs to have the decl in it. + try mod.analyzeExport(&block_scope.base, export_src, mem.spanZ(decl.name), decl); } + + return type_changed; } /// Returns the depender's index of the dependee. -- cgit v1.2.3