From 3c3abaf3907e344305620fb4565e7c1acb0a9c88 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 10 Jul 2021 16:24:35 -0700 Subject: stage2: update liveness analysis to new AIR memory layout It's pretty compact, with each AIR instruction only taking up 4 bits, plus a sparse table for special instructions such as conditional branch, switch branch, and function calls with more than 2 arguments. --- src/Liveness.zig | 457 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 457 insertions(+) create mode 100644 src/Liveness.zig (limited to 'src/Liveness.zig') diff --git a/src/Liveness.zig b/src/Liveness.zig new file mode 100644 index 0000000000..828614dcbb --- /dev/null +++ b/src/Liveness.zig @@ -0,0 +1,457 @@ +//! For each AIR instruction, we want to know: +//! * Is the instruction unreferenced (e.g. dies immediately)? +//! * For each of its operands, does the operand die with this instruction (e.g. is +//! this the last reference to it)? +//! Some instructions are special, such as: +//! * Conditional Branches +//! * Switch Branches +const Liveness = @This(); +const std = @import("std"); +const Air = @import("Air.zig"); +const trace = @import("tracy.zig").trace; +const log = std.log.scoped(.liveness); +const assert = std.debug.assert; +const Allocator = std.mem.Allocator; + +/// This array is split into sets of 4 bits per AIR instruction. +/// The MSB (0bX000) is whether the instruction is unreferenced. +/// The LSB (0b000X) is the first operand, and so on, up to 3 operands. A set bit means the +/// operand dies after this instruction. +/// Instructions which need more data to track liveness have special handling via the +/// `special` table. +tomb_bits: []const usize, +/// Sparse table of specially handled instructions. The value is an index into the `extra` +/// array. The meaning of the data depends on the AIR tag. +special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), +/// Auxilliary data. The way this data is interpreted is determined contextually. +extra: []const u32, + +/// Trailing is the set of instructions whose lifetimes end at the start of the then branch, +/// followed by the set of instructions whose lifetimes end at the start of the else branch. +pub const CondBr = struct { + then_death_count: u32, + else_death_count: u32, +}; + +/// Trailing is: +/// * For each case in the same order as in the AIR: +/// - case_death_count: u32 +/// - Air.Inst.Index for each `case_death_count`: set of instructions whose lifetimes +/// end at the start of this case. +/// * Air.Inst.Index for each `else_death_count`: set of instructions whose lifetimes +/// end at the start of the else case. +pub const SwitchBr = struct { + else_death_count: u32, +}; + +pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { + const tracy = trace(@src()); + defer tracy.end(); + + var a: Analysis = .{ + .gpa = gpa, + .air = &air, + .table = .{}, + .tomb_bits = try gpa.alloc( + usize, + (air.instructions.len * bpi + @bitSizeOf(usize) - 1) / @bitSizeOf(usize), + ), + .extra = .{}, + .special = .{}, + }; + errdefer gpa.free(a.tomb_bits); + errdefer a.special.deinit(gpa); + defer a.extra.deinit(gpa); + defer a.table.deinit(gpa); + + const main_body = air.getMainBody(); + try a.table.ensureTotalCapacity(main_body.len); + try analyzeWithContext(&a, null, main_body); + return Liveness{ + .tomb_bits = a.tomb_bits, + .special = a.special, + .extra = a.extra.toOwnedSlice(gpa), + }; +} + +pub fn deinit(l: *Liveness, gpa: *Allocator) void { + gpa.free(l.tomb_bits); + gpa.free(l.extra); + l.special.deinit(gpa); +} + +/// How many tomb bits per AIR instruction. +const bpi = 4; +const Bpi = std.meta.Int(.unsigned, bpi); + +/// In-progress data; on successful analysis converted into `Liveness`. +const Analysis = struct { + gpa: *Allocator, + air: *const Air, + table: std.AutoHashMapUnmanaged(Air.Inst.Index, void), + tomb_bits: []usize, + extra: std.ArrayListUnmanaged(u32), + + fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void { + const usize_index = (inst * bpi) / @bitSizeOf(usize); + a.tomb_bits[usize_index] |= tomb_bits << (inst % (@bitSizeOf(usize) / bpi)) * bpi; + } + + fn addExtra(a: *Analysis, extra: anytype) Allocator.Error!u32 { + const fields = std.meta.fields(@TypeOf(extra)); + try a.extra.ensureUnusedCapacity(a.gpa, fields.len); + return addExtraAssumeCapacity(a, extra); + } + + fn addExtraAssumeCapacity(a: *Analysis, extra: anytype) u32 { + const fields = std.meta.fields(@TypeOf(extra)); + const result = @intCast(u32, a.extra.items.len); + inline for (fields) |field| { + a.extra.appendAssumeCapacity(switch (field.field_type) { + u32 => @field(extra, field.name), + else => @compileError("bad field type"), + }); + } + return result; + } +}; + +fn analyzeWithContext( + a: *Analysis, + new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), + body: []const Air.Inst.Index, +) Allocator.Error!void { + var i: usize = body.len; + + if (new_set) |ns| { + // We are only interested in doing this for instructions which are born + // before a conditional branch, so after obtaining the new set for + // each branch we prune the instructions which were born within. + while (i != 0) { + i -= 1; + const inst = body[i]; + _ = ns.remove(inst); + try analyzeInst(a, new_set, inst); + } + } else { + while (i != 0) { + i -= 1; + const inst = body[i]; + try analyzeInst(a, new_set, inst); + } + } +} + +fn analyzeInst( + a: *Analysis, + new_set: ?*std.AutoHashMap(Air.Inst.Index, void), + inst: Air.Inst.Index, +) Allocator.Error!void { + const gpa = a.gpa; + const table = &a.table; + const inst_tags = a.air.instructions.items(.tag); + + // No tombstone for this instruction means it is never referenced, + // and its birth marks its own death. Very metal 🤘 + const main_tomb = !table.contains(inst); + + switch (inst_tags[inst]) { + .add, + .addwrap, + .sub, + .subwrap, + .mul, + .mulwrap, + .div, + .bit_and, + .bit_or, + .xor, + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + .bool_and, + .bool_or, + .store, + => { + const o = inst_datas[inst].bin_op; + return trackOperands(a, new_set, inst, main_tomb, .{ o.lhs, o.rhs, .none }); + }, + + .alloc, + .br, + .constant, + .breakpoint, + .dbg_stmt, + .varptr, + .unreach, + => return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }), + + .not, + .bitcast, + .load, + .ref, + .floatcast, + .intcast, + .optional_payload, + .optional_payload_ptr, + .wrap_optional, + .unwrap_errunion_payload, + .unwrap_errunion_err, + .unwrap_errunion_payload_ptr, + .unwrap_errunion_err_ptr, + .wrap_errunion_payload, + .wrap_errunion_err, + => { + const o = inst_datas[inst].ty_op; + return trackOperands(a, new_set, inst, main_tomb, .{ o.operand, .none, .none }); + }, + + .is_null, + .is_non_null, + .is_null_ptr, + .is_non_null_ptr, + .is_err, + .is_non_err, + .is_err_ptr, + .is_non_err_ptr, + .ptrtoint, + .ret, + => { + const operand = inst_datas[inst].un_op; + return trackOperands(a, new_set, inst, main_tomb, .{ operand, .none, .none }); + }, + + .call => { + const inst_data = inst_datas[inst].pl_op; + const callee = inst_data.operand; + const extra = a.air.extraData(Air.Call, inst_data.payload); + const args = a.air.extra[extra.end..][0..extra.data.args_len]; + if (args.len <= bpi - 2) { + var buf: [bpi - 1]Air.Inst.Ref = undefined; + buf[0] = callee; + std.mem.copy(&buf, buf[1..], args); + return trackOperands(a, new_set, inst, main_tomb, buf); + } + @panic("TODO: liveness analysis for function with many args"); + }, + .struct_field_ptr => { + const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data; + return trackOperands(a, new_set, inst, main_tomb, .{ extra.struct_ptr, .none, .none }); + }, + .block => { + const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + const body = a.air.extra[extra.end..][0..extra.data.body_len]; + try analyzeWithContext(a, new_set, body); + // We let this continue so that it can possibly mark the block as + // unreferenced below. + return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }); + }, + .loop => { + const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + const body = a.air.extra[extra.end..][0..extra.data.body_len]; + try analyzeWithContext(a, new_set, body); + return; // Loop has no operands and it is always unreferenced. + }, + .cond_br => { + // Each death that occurs inside one branch, but not the other, needs + // to be added as a death immediately upon entering the other branch. + const inst_data = inst_datas[inst].pl_op; + const condition = inst_data.operand; + const extra = a.air.extraData(Air.CondBr, inst_data.payload); + const then_body = a.air.extra[extra.end..][0..extra.data.then_body_len]; + const else_body = a.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; + + var then_table = std.AutoHashMap(Air.Inst.Index, void).init(gpa); + defer then_table.deinit(); + try analyzeWithContext(a, &then_table, then_body); + + // Reset the table back to its state from before the branch. + { + var it = then_table.keyIterator(); + while (it.next()) |key| { + assert(table.remove(key.*)); + } + } + + var else_table = std.AutoHashMap(Air.Inst.Index, void).init(gpa); + defer else_table.deinit(); + try analyzeWithContext(a, &else_table, else_body); + + var then_entry_deaths = std.ArrayList(Air.Inst.Index).init(gpa); + defer then_entry_deaths.deinit(); + var else_entry_deaths = std.ArrayList(Air.Inst.Index).init(gpa); + defer else_entry_deaths.deinit(); + + { + var it = else_table.keyIterator(); + while (it.next()) |key| { + const else_death = key.*; + if (!then_table.contains(else_death)) { + try then_entry_deaths.append(else_death); + } + } + } + // This loop is the same, except it's for the then branch, and it additionally + // has to put its items back into the table to undo the reset. + { + var it = then_table.keyIterator(); + while (it.next()) |key| { + const then_death = key.*; + if (!else_table.contains(then_death)) { + try else_entry_deaths.append(then_death); + } + try table.put(gpa, then_death, {}); + } + } + // Now we have to correctly populate new_set. + if (new_set) |ns| { + try ns.ensureCapacity(@intCast(u32, ns.count() + then_table.count() + else_table.count())); + var it = then_table.keyIterator(); + while (it.next()) |key| { + _ = ns.putAssumeCapacity(key.*, {}); + } + it = else_table.keyIterator(); + while (it.next()) |key| { + _ = ns.putAssumeCapacity(key.*, {}); + } + } + const then_death_count = @intCast(u32, then_entry_deaths.items.len); + const else_death_count = @intCast(u32, else_entry_deaths.items.len); + + try a.extra.ensureUnusedCapacity(std.meta.fields(@TypeOf(CondBr)).len + + then_death_count + else_death_count); + const extra_index = a.addExtraAssumeCapacity(CondBr{ + .then_death_count = then_death_count, + .else_death_count = else_death_count, + }); + a.extra.appendSliceAssumeCapacity(then_entry_deaths.items); + a.extra.appendSliceAssumeCapacity(else_entry_deaths.items); + try a.special.put(inst, extra_index); + + // Continue on with the instruction analysis. The following code will find the condition + // instruction, and the deaths flag for the CondBr instruction will indicate whether the + // condition's lifetime ends immediately before entering any branch. + return trackOperands(a, new_set, inst, main_tomb, .{ condition, .none, .none }); + }, + .switch_br => { + const inst_data = inst_datas[inst].pl_op; + const condition = inst_data.operand; + const switch_br = a.air.extraData(Air.SwitchBr, inst_data.payload); + + const Table = std.AutoHashMapUnmanaged(Air.Inst.Index, void); + const case_tables = try gpa.alloc(Table, switch_br.data.cases_len + 1); // +1 for else + defer gpa.free(case_tables); + + std.mem.set(Table, case_tables, .{}); + defer for (case_tables) |*ct| ct.deinit(gpa); + + var air_extra_index: usize = switch_br.end; + for (case_tables[0..switch_br.data.cases_len]) |*case_table| { + const case = a.air.extraData(Air.SwitchBr.Case, air_extra_index); + const case_body = a.air.extra[case.end..][0..case.data.body_len]; + air_extra_index = case.end + case_body.len; + try analyzeWithContext(a, case_table, case_body); + + // Reset the table back to its state from before the case. + var it = case_table.keyIterator(); + while (it.next()) |key| { + assert(table.remove(key.*)); + } + } + { // else + const else_table = &case_tables[case_tables.len - 1]; + const else_body = a.air.extra[air_extra_index..][0..switch_br.data.else_body_len]; + try analyzeWithContext(a, else_table, else_body); + + // Reset the table back to its state from before the case. + var it = else_table.keyIterator(); + while (it.next()) |key| { + assert(table.remove(key.*)); + } + } + + const List = std.ArrayListUnmanaged(Air.Inst.Index); + const case_deaths = try gpa.alloc(List, case_tables.len); // includes else + defer gpa.free(case_deaths); + + std.mem.set(List, case_deaths, .{}); + defer for (case_deaths) |*cd| cd.deinit(gpa); + + var total_deaths: u32 = 0; + for (case_tables) |*ct, i| { + total_deaths += ct.count(); + var it = ct.keyIterator(); + while (it.next()) |key| { + const case_death = key.*; + for (case_tables) |*ct_inner, j| { + if (i == j) continue; + if (!ct_inner.contains(case_death)) { + // instruction is not referenced in this case + try case_deaths[j].append(gpa, case_death); + } + } + // undo resetting the table + try table.put(gpa, case_death, {}); + } + } + + // Now we have to correctly populate new_set. + if (new_set) |ns| { + try ns.ensureUnusedCapacity(gpa, total_deaths); + for (case_tables) |*ct| { + var it = ct.keyIterator(); + while (it.next()) |key| { + _ = ns.putAssumeCapacity(key.*, {}); + } + } + } + + const else_death_count = @intCast(u32, case_deaths[case_deaths.len - 1].items.len); + const extra_index = try a.addExtra(SwitchBr{ + .else_death_count = else_death_count, + }); + for (case_deaths[0 .. case_deaths.len - 1]) |*cd| { + const case_death_count = @intCast(u32, cd.items.len); + try a.extra.ensureUnusedCapacity(1 + case_death_count + else_death_count); + a.extra.appendAssumeCapacity(case_death_count); + a.extra.appendSliceAssumeCapacity(cd.items); + } + a.extra.appendSliceAssumeCapacity(case_deaths[case_deaths.len - 1].items); + try a.special.put(inst, extra_index); + + return trackOperands(a, new_set, inst, main_tomb, .{ condition, .none, .none }); + }, + } +} + +fn trackOperands( + a: *Analysis, + new_set: ?*std.AutoHashMap(Air.Inst.Index, void), + inst: Air.Inst.Index, + main_tomb: bool, + operands: [bpi - 1]Air.Inst.Ref, +) Allocator.Error!void { + const table = &a.table; + const gpa = a.gpa; + + var tomb_bits: Bpi = @boolToInt(main_tomb); + var i = operands.len; + + while (i > 0) { + i -= 1; + tomb_bits <<= 1; + const op_int = @enumToInt(operands[i]); + if (op_int < Air.Inst.Ref.typed_value_map.len) continue; + const operand: Air.Inst.Index = op_int - Air.Inst.Ref.typed_value_map.len; + const prev = try table.fetchPut(gpa, operand, {}); + if (prev == null) { + // Death. + tomb_bits |= 1; + if (new_set) |ns| try ns.putNoClobber(operand, {}); + } + } + a.storeTombBits(inst, tomb_bits); +} -- cgit v1.2.3 From ef7080aed1a1a4dc54cb837938e462b4e6720734 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 11 Jul 2021 16:32:11 -0700 Subject: stage2: update Liveness, SPIR-V for new AIR memory layout also do the inline assembly instruction --- BRANCH_TODO | 44 ---- src/Air.zig | 60 ++++-- src/Compilation.zig | 57 +++-- src/Liveness.zig | 1 + src/Module.zig | 36 +++- src/Sema.zig | 563 +++++++++++++++++++++++++------------------------- src/codegen/spirv.zig | 411 ++++++++++++++++++------------------ 7 files changed, 595 insertions(+), 577 deletions(-) (limited to 'src/Liveness.zig') diff --git a/BRANCH_TODO b/BRANCH_TODO index 5bc4d2a2f5..3b946edbbd 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,24 +1,6 @@ * be sure to test debug info of parameters - /// Each bit represents the index of an `Inst` parameter in the `args` field. - /// If a bit is set, it marks the end of the lifetime of the corresponding - /// instruction parameter. For example, 0b101 means that the first and - /// third `Inst` parameters' lifetimes end after this instruction, and will - /// not have any more following references. - /// The most significant bit being set means that the instruction itself is - /// never referenced, in other words its lifetime ends as soon as it finishes. - /// If bit 15 (0b1xxx_xxxx_xxxx_xxxx) is set, it means this instruction itself is unreferenced. - /// If bit 14 (0bx1xx_xxxx_xxxx_xxxx) is set, it means this is a special case and the - /// lifetimes of operands are encoded elsewhere. - deaths: DeathsInt = undefined, - - - pub const DeathsInt = u16; - pub const DeathsBitIndex = std.math.Log2Int(DeathsInt); - pub const unreferenced_bit_index = @typeInfo(DeathsInt).Int.bits - 1; - pub const deaths_bits = unreferenced_bit_index - 1; - pub fn isUnused(self: Inst) bool { return (self.deaths & (1 << unreferenced_bit_index)) != 0; } @@ -115,32 +97,6 @@ - pub const Assembly = struct { - pub const base_tag = Tag.assembly; - - base: Inst, - asm_source: []const u8, - is_volatile: bool, - output_constraint: ?[]const u8, - inputs: []const []const u8, - clobbers: []const []const u8, - args: []const *Inst, - - pub fn operandCount(self: *const Assembly) usize { - return self.args.len; - } - pub fn getOperand(self: *const Assembly, index: usize) ?*Inst { - if (index < self.args.len) - return self.args[index]; - return null; - } - }; - - pub const StructFieldPtr = struct { - struct_ptr: *Inst, - field_index: usize, - }; - /// For debugging purposes, prints a function representation to stderr. pub fn dumpFn(old_module: Module, module_fn: *Module.Fn) void { diff --git a/src/Air.zig b/src/Air.zig index c57232fba0..112845559d 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1,5 +1,7 @@ //! Analyzed Intermediate Representation. -//! Sema inputs ZIR and outputs AIR. +//! This data is produced by Sema and consumed by codegen. +//! Unlike ZIR where there is one instance for an entire source file, each function +//! gets its own `Air` instance. const std = @import("std"); const Value = @import("value.zig").Value; @@ -27,38 +29,48 @@ pub const Inst = struct { data: Data, pub const Tag = enum(u8) { + /// The first N instructions in Air must be one arg instruction per function parameter. + /// Uses the `ty` field. + arg, /// Float or integer addition. For integers, wrapping is undefined behavior. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. add, /// Integer addition. Wrapping is defined to be twos complement wrapping. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. addwrap, /// Float or integer subtraction. For integers, wrapping is undefined behavior. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. sub, /// Integer subtraction. Wrapping is defined to be twos complement wrapping. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. subwrap, /// Float or integer multiplication. For integers, wrapping is undefined behavior. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. mul, /// Integer multiplication. Wrapping is defined to be twos complement wrapping. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. mulwrap, /// Integer or float division. For integers, wrapping is undefined behavior. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. div, /// Allocates stack local memory. /// Uses the `ty` field. alloc, - /// TODO + /// Inline assembly. Uses the `ty_pl` field. Payload is `Asm`. assembly, /// Bitwise AND. `&`. /// Result type is the same as both operands. @@ -80,7 +92,7 @@ pub const Inst = struct { /// Uses the `ty_pl` field with payload `Block`. block, /// Return from a block with a result. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `br` field. br, /// Lowers to a hardware trap instruction, or the next best thing. @@ -109,11 +121,11 @@ pub const Inst = struct { /// Uses the `bin_op` field. cmp_neq, /// Conditional branch. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `pl_op` field. Operand is the condition. Payload is `CondBr`. cond_br, /// Switch branch. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `pl_op` field. Operand is the condition. Payload is `SwitchBr`. switch_br, /// A comptime-known value. Uses the `ty_pl` field, payload is index of @@ -166,7 +178,7 @@ pub const Inst = struct { load, /// A labeled block of code that loops forever. At the end of the body it is implied /// to repeat; no explicit "repeat" instruction terminates loop bodies. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `ty_pl` field. Payload is `Block`. loop, /// Converts a pointer to its address. Result type is always `usize`. @@ -178,7 +190,7 @@ pub const Inst = struct { /// Uses the `ty_op` field. ref, /// Return a value from a function. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `un_op` field. ret, /// Returns a pointer to a global variable. @@ -189,7 +201,7 @@ pub const Inst = struct { /// Uses the `bin_op` field. store, /// Indicates the program counter will never get to this instruction. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. unreach, /// Convert from one float type to another. /// Uses the `ty_op` field. @@ -343,6 +355,16 @@ pub const StructField = struct { field_index: u32, }; +/// Trailing: +/// 0. `Ref` for every outputs_len +/// 1. `Ref` for every inputs_len +pub const Asm = struct { + /// Index to the corresponding ZIR instruction. + /// `asm_source`, `outputs_len`, `inputs_len`, `clobbers_len`, `is_volatile`, and + /// clobbers are found via here. + zir_index: u32, +}; + pub fn getMainBody(air: Air) []const Air.Inst.Index { const body_index = air.extra[@enumToInt(ExtraIndex.main_block)]; const body_len = air.extra[body_index]; @@ -369,3 +391,11 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end .end = i, }; } + +pub fn deinit(air: *Air, gpa: *std.mem.Allocator) void { + air.instructions.deinit(gpa); + gpa.free(air.extra); + gpa.free(air.values); + gpa.free(air.variables); + air.* = undefined; +} diff --git a/src/Compilation.zig b/src/Compilation.zig index b9055eceed..74ad7b2aae 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -13,7 +13,7 @@ const target_util = @import("target.zig"); const Package = @import("Package.zig"); const link = @import("link.zig"); const trace = @import("tracy.zig").trace; -const liveness = @import("liveness.zig"); +const Liveness = @import("Liveness.zig"); const build_options = @import("build_options"); const LibCInstallation = @import("libc_installation.zig").LibCInstallation; const glibc = @import("glibc.zig"); @@ -1922,6 +1922,7 @@ pub fn getCompileLogOutput(self: *Compilation) []const u8 { } pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemory }!void { + const gpa = self.gpa; // If the terminal is dumb, we dont want to show the user all the // output. var progress: std.Progress = .{ .dont_print_on_dumb = true }; @@ -2005,7 +2006,8 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor assert(decl.has_tv); if (decl.val.castTag(.function)) |payload| { const func = payload.data; - switch (func.state) { + + var air = switch (func.state) { .queued => module.analyzeFnBody(decl, func) catch |err| switch (err) { error.AnalysisFail => { assert(func.state != .in_progress); @@ -2016,18 +2018,39 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor .in_progress => unreachable, .inline_only => unreachable, // don't queue work for this .sema_failure, .dependency_failure => continue, - .success => {}, - } - // Here we tack on additional allocations to the Decl's arena. The allocations - // are lifetime annotations in the ZIR. - var decl_arena = decl.value_arena.?.promote(module.gpa); - defer decl.value_arena.?.* = decl_arena.state; + .success => unreachable, // don't queue it twice + }; + defer air.deinit(gpa); + log.debug("analyze liveness of {s}", .{decl.name}); - try liveness.analyze(module.gpa, &decl_arena.allocator, func.body); + var liveness = try Liveness.analyze(gpa, air); + defer liveness.deinit(gpa); if (std.builtin.mode == .Debug and self.verbose_air) { func.dump(module.*); } + + assert(decl.ty.hasCodeGenBits()); + + self.bin_file.updateFunc(module, func, air, liveness) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => { + decl.analysis = .codegen_failure; + continue; + }, + else => { + try module.failed_decls.ensureUnusedCapacity(gpa, 1); + module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( + gpa, + decl.srcLoc(), + "unable to codegen: {s}", + .{@errorName(err)}, + )); + decl.analysis = .codegen_failure_retryable; + continue; + }, + }; + continue; } assert(decl.ty.hasCodeGenBits()); @@ -2039,9 +2062,9 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor continue; }, else => { - try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.count() + 1); + try module.failed_decls.ensureCapacity(gpa, module.failed_decls.count() + 1); module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( - module.gpa, + gpa, decl.srcLoc(), "unable to codegen: {s}", .{@errorName(err)}, @@ -2070,7 +2093,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor @panic("sadly stage2 is omitted from this build to save memory on the CI server"); const module = self.bin_file.options.module.?; const emit_h = module.emit_h.?; - _ = try emit_h.decl_table.getOrPut(module.gpa, decl); + _ = try emit_h.decl_table.getOrPut(gpa, decl); const decl_emit_h = decl.getEmitH(module); const fwd_decl = &decl_emit_h.fwd_decl; fwd_decl.shrinkRetainingCapacity(0); @@ -2079,7 +2102,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor .module = module, .error_msg = null, .decl = decl, - .fwd_decl = fwd_decl.toManaged(module.gpa), + .fwd_decl = fwd_decl.toManaged(gpa), // we don't want to emit optionals and error unions to headers since they have no ABI .typedefs = undefined, }; @@ -2087,14 +2110,14 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor c_codegen.genHeader(&dg) catch |err| switch (err) { error.AnalysisFail => { - try emit_h.failed_decls.put(module.gpa, decl, dg.error_msg.?); + try emit_h.failed_decls.put(gpa, decl, dg.error_msg.?); continue; }, else => |e| return e, }; fwd_decl.* = dg.fwd_decl.moveToUnmanaged(); - fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len); + fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len); }, }, .analyze_decl => |decl| { @@ -2111,9 +2134,9 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor @panic("sadly stage2 is omitted from this build to save memory on the CI server"); const module = self.bin_file.options.module.?; self.bin_file.updateDeclLineNumber(module, decl) catch |err| { - try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.count() + 1); + try module.failed_decls.ensureCapacity(gpa, module.failed_decls.count() + 1); module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( - module.gpa, + gpa, decl.srcLoc(), "unable to update line number: {s}", .{@errorName(err)}, diff --git a/src/Liveness.zig b/src/Liveness.zig index 828614dcbb..84e2495054 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -150,6 +150,7 @@ fn analyzeInst( const gpa = a.gpa; const table = &a.table; const inst_tags = a.air.instructions.items(.tag); + const inst_datas = a.air.instructions.items(.data); // No tombstone for this instruction means it is never referenced, // and its birth marks its own death. Very metal 🤘 diff --git a/src/Module.zig b/src/Module.zig index 2f1dc0b33b..6273243ee2 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -739,8 +739,6 @@ pub const Union = struct { pub const Fn = struct { /// The Decl that corresponds to the function itself. owner_decl: *Decl, - /// undefined unless analysis state is `success`. - body: ir.Body, /// The ZIR instruction that is a function instruction. Use this to find /// the body. We store this rather than the body directly so that when ZIR /// is regenerated on update(), we can map this to the new corresponding @@ -3585,17 +3583,19 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void { mod.gpa.free(kv.value); } -pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void { +pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { const tracy = trace(@src()); defer tracy.end(); + const gpa = mod.gpa; + // Use the Decl's arena for function memory. - var arena = decl.value_arena.?.promote(mod.gpa); + var arena = decl.value_arena.?.promote(gpa); defer decl.value_arena.?.* = arena.state; const fn_ty = decl.ty; - const param_inst_list = try mod.gpa.alloc(*ir.Inst, fn_ty.fnParamLen()); - defer mod.gpa.free(param_inst_list); + const param_inst_list = try gpa.alloc(*ir.Inst, fn_ty.fnParamLen()); + defer gpa.free(param_inst_list); for (param_inst_list) |*param_inst, param_index| { const param_type = fn_ty.fnParamType(param_index); @@ -3615,7 +3615,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void { var sema: Sema = .{ .mod = mod, - .gpa = mod.gpa, + .gpa = gpa, .arena = &arena.allocator, .code = zir, .owner_decl = decl, @@ -3626,6 +3626,11 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void { }; defer sema.deinit(); + // First few indexes of extra are reserved and set at the end. + const reserved_count = @typeInfo(Air.ExtraIndex).Enum.fields.len; + try sema.air_extra.ensureTotalCapacity(gpa, reserved_count); + sema.air_extra.items.len += reserved_count; + var inner_block: Scope.Block = .{ .parent = null, .sema = &sema, @@ -3634,20 +3639,29 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void { .inlining = null, .is_comptime = false, }; - defer inner_block.instructions.deinit(mod.gpa); + defer inner_block.instructions.deinit(gpa); // AIR currently requires the arg parameters to be the first N instructions - try inner_block.instructions.appendSlice(mod.gpa, param_inst_list); + try inner_block.instructions.appendSlice(gpa, param_inst_list); func.state = .in_progress; log.debug("set {s} to in_progress", .{decl.name}); try sema.analyzeFnBody(&inner_block, func.zir_body_inst); - const instructions = try arena.allocator.dupe(*ir.Inst, inner_block.instructions.items); + // Copy the block into place and mark that as the main block. + sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = sema.air_extra.items.len; + try sema.air_extra.appendSlice(inner_block.instructions.items); + func.state = .success; - func.body = .{ .instructions = instructions }; log.debug("set {s} to success", .{decl.name}); + + return Air{ + .instructions = sema.air_instructions.toOwnedSlice(), + .extra = sema.air_extra.toOwnedSlice(), + .values = sema.air_values.toOwnedSlice(), + .variables = sema.air_variables.toOwnedSlice(), + }; } fn markOutdatedDecl(mod: *Module, decl: *Decl) !void { diff --git a/src/Sema.zig b/src/Sema.zig index 85cb4aa423..b4e10837af 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1,6 +1,6 @@ //! Semantic analysis of ZIR instructions. //! Shared to every Block. Stored on the stack. -//! State used for compiling a `Zir` into AIR. +//! State used for compiling a ZIR into AIR. //! Transforms untyped ZIR instructions into semantically-analyzed AIR instructions. //! Does type checking, comptime control flow, and safety-check generation. //! This is the the heart of the Zig compiler. @@ -11,6 +11,10 @@ gpa: *Allocator, /// Points to the arena allocator of the Decl. arena: *Allocator, code: Zir, +air_instructions: std.MultiArrayList(Air.Inst) = .{}, +air_extra: ArrayListUnmanaged(u32) = .{}, +air_values: ArrayListUnmanaged(Value) = .{}, +air_variables: ArrayListUnmanaged(Module.Var) = .{}, /// Maps ZIR to AIR. inst_map: InstMap = .{}, /// When analyzing an inline function call, owner_decl is the Decl of the caller @@ -32,7 +36,7 @@ func: ?*Module.Fn, /// > Denormalized data to make `resolveInst` faster. This is 0 if not inside a function, /// > otherwise it is the number of parameters of the function. /// > param_count: u32 -param_inst_list: []const *ir.Inst, +param_inst_list: []const Air.Inst.Index, branch_quota: u32 = 1000, branch_count: u32 = 0, /// This field is updated when a new source location becomes active, so that @@ -65,10 +69,15 @@ const LazySrcLoc = Module.LazySrcLoc; const RangeSet = @import("RangeSet.zig"); const target_util = @import("target.zig"); -pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, *ir.Inst); +pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Index); pub fn deinit(sema: *Sema) void { - sema.inst_map.deinit(sema.gpa); + const gpa = sema.gpa; + sema.air_instructions.deinit(gpa); + sema.air_extra.deinit(gpa); + sema.air_values.deinit(gpa); + sema.air_variables.deinit(gpa); + sema.inst_map.deinit(gpa); sema.* = undefined; } @@ -108,7 +117,7 @@ pub fn analyzeFnBody( /// Returns only the result from the body that is specified. /// Only appropriate to call when it is determined at comptime that this body /// has no peers. -fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!*Inst { +fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!Air.Inst.Index { const break_inst = try sema.analyzeBody(block, body); const operand_ref = sema.code.instructions.items(.data)[break_inst].@"break".operand; return sema.resolveInst(operand_ref); @@ -533,7 +542,7 @@ pub fn analyzeBody( } } -fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const extended = sema.code.instructions.items(.data)[inst].extended; switch (extended.opcode) { // zig fmt: off @@ -569,7 +578,7 @@ fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } /// TODO when we rework AIR memory layout, this function will no longer have a possible error. -pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) error{OutOfMemory}!*ir.Inst { +pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) error{OutOfMemory}!Air.Inst.Index { var i: usize = @enumToInt(zir_ref); // First section of indexes correspond to a set number of constant values. @@ -618,19 +627,19 @@ pub fn resolveType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, zir_ref: Z return sema.resolveAirAsType(block, src, air_inst); } -fn resolveAirAsType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, air_inst: *ir.Inst) !Type { +fn resolveAirAsType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, air_inst: Air.Inst.Index) !Type { const wanted_type = Type.initTag(.@"type"); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); return val.toType(sema.arena); } -fn resolveConstValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: *ir.Inst) !Value { +fn resolveConstValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !Value { return (try sema.resolveDefinedValue(block, src, base)) orelse return sema.failWithNeededComptime(block, src); } -fn resolveDefinedValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: *ir.Inst) !?Value { +fn resolveDefinedValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !?Value { if (try sema.resolvePossiblyUndefinedValue(block, src, base)) |val| { if (val.isUndef()) { return sema.failWithUseOfUndef(block, src); @@ -644,7 +653,7 @@ fn resolvePossiblyUndefinedValue( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - base: *ir.Inst, + base: Air.Inst.Index, ) !?Value { if (try sema.typeHasOnePossibleValue(block, src, base.ty)) |opv| { return opv; @@ -708,13 +717,13 @@ pub fn resolveInstConst( }; } -fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO implement zir_sema.zirBitcastResultPtr", .{}); } -fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = inst; const tracy = trace(@src()); defer tracy.end(); @@ -749,7 +758,7 @@ fn zirStructDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); @@ -820,7 +829,7 @@ fn zirEnumDecl( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1017,7 +1026,7 @@ fn zirUnionDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1081,7 +1090,7 @@ fn zirOpaqueDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1101,7 +1110,7 @@ fn zirErrorSetDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1141,7 +1150,7 @@ fn zirRetPtr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1153,7 +1162,7 @@ fn zirRetPtr( return block.addNoOp(src, ptr_type, .alloc); } -fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1166,7 +1175,7 @@ fn zirRetType( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1191,7 +1200,7 @@ fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) I fn ensureResultUsed( sema: *Sema, block: *Scope.Block, - operand: *Inst, + operand: Air.Inst.Index, src: LazySrcLoc, ) InnerError!void { switch (operand.ty.zigTypeTag()) { @@ -1213,7 +1222,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde } } -fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1247,7 +1256,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const arg_name = inst_data.get(sema.code); const arg_index = sema.next_arg_index; @@ -1269,13 +1278,13 @@ fn zirAllocExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocExtended", .{}); } -fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1298,13 +1307,13 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne }); } -fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocInferredComptime", .{}); } -fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1317,7 +1326,7 @@ fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!* return block.addNoOp(var_decl_src, ptr_type, .alloc); } -fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1336,7 +1345,7 @@ fn zirAllocInferred( block: *Scope.Block, inst: Zir.Inst.Index, inferred_alloc_ty: Type, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1589,7 +1598,7 @@ fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.storePtr(block, src, ptr, value); } -fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1625,7 +1634,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.constType(sema.arena, src, param_type); } -fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1653,7 +1662,7 @@ fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In return sema.analyzeDeclRef(block, .unneeded, new_decl); } -fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1662,7 +1671,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In return sema.mod.constIntUnsigned(sema.arena, .unneeded, Type.initTag(.comptime_int), int); } -fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1680,7 +1689,7 @@ fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! }); } -fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].float; @@ -1693,7 +1702,7 @@ fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!* }); } -fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; @@ -1722,7 +1731,7 @@ fn zirCompileLog( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { var managed = sema.mod.compile_log_text.toManaged(sema.gpa); defer sema.mod.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); @@ -1772,7 +1781,7 @@ fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Z return sema.panicWithMsg(block, src, msg_inst); } -fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1832,12 +1841,12 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerE // Loop repetition is implied so the last instruction may or may not be a noreturn instruction. try child_block.instructions.append(sema.gpa, &loop_inst.base); - loop_inst.body = .{ .instructions = try sema.arena.dupe(*Inst, loop_block.instructions.items) }; + loop_inst.body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, loop_block.instructions.items) }; return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } -fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1847,13 +1856,13 @@ fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirCImport", .{}); } -fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirSuspendBlock", .{}); } -fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1911,7 +1920,7 @@ fn resolveBlockBody( child_block: *Scope.Block, body: []const Zir.Inst.Index, merges: *Scope.Block.Merges, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { _ = try sema.analyzeBody(child_block, body); return sema.analyzeBlockBody(parent_block, src, child_block, merges); } @@ -1922,7 +1931,7 @@ fn analyzeBlockBody( src: LazySrcLoc, child_block: *Scope.Block, merges: *Scope.Block.Merges, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1933,7 +1942,7 @@ fn analyzeBlockBody( if (merges.results.items.len == 0) { // No need for a block instruction. We can put the new instructions // directly into the parent block. - const copied_instructions = try sema.arena.dupe(*Inst, child_block.instructions.items); + const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items); try parent_block.instructions.appendSlice(sema.gpa, copied_instructions); return copied_instructions[copied_instructions.len - 1]; } @@ -1944,7 +1953,7 @@ fn analyzeBlockBody( if (br_block == merges.block_inst) { // No need for a block instruction. We can put the new instructions directly // into the parent block. Here we omit the break instruction. - const copied_instructions = try sema.arena.dupe(*Inst, child_block.instructions.items[0..last_inst_index]); + const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items[0..last_inst_index]); try parent_block.instructions.appendSlice(sema.gpa, copied_instructions); return merges.results.items[0]; } @@ -1959,7 +1968,7 @@ fn analyzeBlockBody( const resolved_ty = try sema.resolvePeerTypes(parent_block, src, merges.results.items); merges.block_inst.base.ty = resolved_ty; merges.block_inst.body = .{ - .instructions = try sema.arena.dupe(*Inst, child_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items), }; // Now that the block has its type resolved, we need to go back into all the break // instructions, and insert type coercion on the operands. @@ -1991,7 +2000,7 @@ fn analyzeBlockBody( }, .block = merges.block_inst, .body = .{ - .instructions = try sema.arena.dupe(*Inst, coerce_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, coerce_block.instructions.items), }, }; } @@ -2130,7 +2139,7 @@ fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError _ = try block.addDbgStmt(.unneeded, inst_data.line, inst_data.column); } -fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2138,7 +2147,7 @@ fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeDeclRef(block, src, decl); } -fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2192,7 +2201,7 @@ fn zirCall( inst: Zir.Inst.Index, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2204,7 +2213,7 @@ fn zirCall( const func = try sema.resolveInst(extra.data.callee); // TODO handle function calls of generic functions - const resolved_args = try sema.arena.alloc(*Inst, args.len); + const resolved_args = try sema.arena.alloc(Air.Inst.Index, args.len); for (args) |zir_arg, i| { // the args are already casted to the result of a param type instruction. resolved_args[i] = try sema.resolveInst(zir_arg); @@ -2216,13 +2225,13 @@ fn zirCall( fn analyzeCall( sema: *Sema, block: *Scope.Block, - func: *ir.Inst, + func: Air.Inst.Index, func_src: LazySrcLoc, call_src: LazySrcLoc, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, - args: []const *ir.Inst, -) InnerError!*ir.Inst { + args: []const Air.Inst.Index, +) InnerError!Air.Inst.Index { if (func.ty.zigTypeTag() != .Fn) return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func.ty}); @@ -2279,7 +2288,7 @@ fn analyzeCall( const is_comptime_call = block.is_comptime or modifier == .compile_time; const is_inline_call = is_comptime_call or modifier == .always_inline or func.ty.fnCallingConvention() == .Inline; - const result: *Inst = if (is_inline_call) res: { + const result: Air.Inst.Index = if (is_inline_call) res: { const func_val = try sema.resolveConstValue(block, func_src, func); const module_fn = switch (func_val.tag()) { .function => func_val.castTag(.function).?.data, @@ -2377,7 +2386,7 @@ fn analyzeCall( return result; } -fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2389,7 +2398,7 @@ fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.constType(sema.arena, src, ty); } -fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2401,7 +2410,7 @@ fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.mod.constType(sema.arena, src, opt_type); } -fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const array_type = try sema.resolveType(block, src, inst_data.operand); @@ -2409,7 +2418,7 @@ fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.constType(sema.arena, src, elem_type); } -fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -2424,7 +2433,7 @@ fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.mod.constType(sema.arena, src, vector_type); } -fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2437,7 +2446,7 @@ fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.constType(sema.arena, .unneeded, array_ty); } -fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2452,7 +2461,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) return sema.mod.constType(sema.arena, .unneeded, array_ty); } -fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2465,7 +2474,7 @@ fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.mod.constType(sema.arena, src, anyframe_type); } -fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2486,7 +2495,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.constType(sema.arena, src, err_union_ty); } -fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2505,7 +2514,7 @@ fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr }); } -fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2535,7 +2544,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addUnOp(src, result_ty, .bitcast, op_coerced); } -fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2568,7 +2577,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addUnOp(src, Type.initTag(.anyerror), .bitcast, op); } -fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2658,7 +2667,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn }); } -fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2672,7 +2681,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE }); } -fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const mod = sema.mod; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; @@ -2680,7 +2689,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); - const enum_tag: *Inst = switch (operand.ty.zigTypeTag()) { + const enum_tag: Air.Inst.Index = switch (operand.ty.zigTypeTag()) { .Enum => operand, .Union => { //if (!operand.ty.unionHasTag()) { @@ -2754,7 +2763,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return block.addUnOp(src, int_tag_ty, .bitcast, enum_tag); } -fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const mod = sema.mod; const target = mod.getTarget(); const arena = sema.arena; @@ -2815,7 +2824,7 @@ fn zirOptionalPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2858,7 +2867,7 @@ fn zirOptionalPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2896,7 +2905,7 @@ fn zirErrUnionPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2930,7 +2939,7 @@ fn zirErrUnionPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2969,7 +2978,7 @@ fn zirErrUnionPayloadPtr( } /// Value in, value out -fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2995,7 +3004,7 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner } /// Pointer in, value out -fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3042,7 +3051,7 @@ fn zirFunc( block: *Scope.Block, inst: Zir.Inst.Index, inferred_error_set: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3093,7 +3102,7 @@ fn funcCommon( is_extern: bool, src_locs: Zir.Inst.Func.SrcLocs, opt_lib_name: ?[]const u8, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const bare_return_type = try sema.resolveType(block, ret_ty_src, zir_return_type); @@ -3234,7 +3243,7 @@ fn funcCommon( return result; } -fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3242,7 +3251,7 @@ fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Ins return sema.analyzeAs(block, .unneeded, bin_inst.lhs, bin_inst.rhs); } -fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3258,13 +3267,13 @@ fn analyzeAs( src: LazySrcLoc, zir_dest_type: Zir.Inst.Ref, zir_operand: Zir.Inst.Ref, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const dest_type = try sema.resolveType(block, src, zir_dest_type); const operand = try sema.resolveInst(zir_operand); return sema.coerce(block, dest_type, operand, src); } -fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3281,7 +3290,7 @@ fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(src, ty, .ptrtoint, ptr); } -fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3299,7 +3308,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3312,7 +3321,7 @@ fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3327,7 +3336,7 @@ fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3340,7 +3349,7 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3383,7 +3392,7 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten int", .{}); } -fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3396,7 +3405,7 @@ fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.bitcast(block, dest_type, operand); } -fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3439,7 +3448,7 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten float", .{}); } -fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3454,7 +3463,7 @@ fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeLoad(block, sema.src, result_ptr, sema.src); } -fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3472,7 +3481,7 @@ fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3482,7 +3491,7 @@ fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); } -fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3495,7 +3504,7 @@ fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); } -fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3508,7 +3517,7 @@ fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.analyzeSlice(block, src, array_ptr, start, null, null, .unneeded); } -fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3522,7 +3531,7 @@ fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeSlice(block, src, array_ptr, start, end, null, .unneeded); } -fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3544,7 +3553,7 @@ fn zirSwitchCapture( inst: Zir.Inst.Index, is_multi: bool, is_ref: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3563,7 +3572,7 @@ fn zirSwitchCaptureElse( block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3582,7 +3591,7 @@ fn zirSwitchBlock( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3615,7 +3624,7 @@ fn zirSwitchBlockMulti( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3645,14 +3654,14 @@ fn zirSwitchBlockMulti( fn analyzeSwitch( sema: *Sema, block: *Scope.Block, - operand: *Inst, + operand: Air.Inst.Index, extra_end: usize, special_prong: Zir.SpecialProng, scalar_cases_len: usize, multi_cases_len: usize, switch_inst: Zir.Inst.Index, src_node_offset: i32, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const gpa = sema.gpa; const mod = sema.mod; @@ -4187,7 +4196,7 @@ fn analyzeSwitch( cases[scalar_i] = .{ .item = item_val, - .body = .{ .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items) }, + .body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items) }, }; } @@ -4207,7 +4216,7 @@ fn analyzeSwitch( case_block.instructions.shrinkRetainingCapacity(0); - var any_ok: ?*Inst = null; + var any_ok: ?Air.Inst.Index = null; const bool_ty = comptime Type.initTag(.bool); for (items) |item_ref| { @@ -4280,7 +4289,7 @@ fn analyzeSwitch( try case_block.instructions.append(gpa, &new_condbr.base); const cond_body: Body = .{ - .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), }; case_block.instructions.shrinkRetainingCapacity(0); @@ -4288,7 +4297,7 @@ fn analyzeSwitch( extra_index += body_len; _ = try sema.analyzeBody(&case_block, body); new_condbr.then_body = .{ - .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), }; if (prev_condbr) |condbr| { condbr.else_body = cond_body; @@ -4303,7 +4312,7 @@ fn analyzeSwitch( case_block.instructions.shrinkRetainingCapacity(0); _ = try sema.analyzeBody(&case_block, special.body); const else_body: Body = .{ - .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), }; if (prev_condbr) |condbr| { condbr.else_body = else_body; @@ -4507,7 +4516,7 @@ fn validateSwitchNoRange( return sema.mod.failWithOwnedErrorMsg(&block.base, msg); } -fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; _ = extra; @@ -4516,7 +4525,7 @@ fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, src, "TODO implement zirHasField", .{}); } -fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -4541,7 +4550,7 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return mod.constBool(arena, src, false); } -fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4566,13 +4575,13 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return mod.constType(sema.arena, src, file_root_decl.ty); } -fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; _ = inst; return sema.mod.fail(&block.base, sema.src, "TODO implement zirRetErrValueCode", .{}); } -fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4581,7 +4590,7 @@ fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In return sema.mod.fail(&block.base, sema.src, "TODO implement zirShl", .{}); } -fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4594,7 +4603,7 @@ fn zirBitwise( block: *Scope.Block, inst: Zir.Inst.Index, ir_tag: ir.Inst.Tag, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4606,7 +4615,7 @@ fn zirBitwise( const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); - const instructions = &[_]*Inst{ lhs, rhs }; + const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -4652,7 +4661,7 @@ fn zirBitwise( return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); } -fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4660,7 +4669,7 @@ fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.fail(&block.base, sema.src, "TODO implement zirBitNot", .{}); } -fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4668,7 +4677,7 @@ fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayCat", .{}); } -fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4681,7 +4690,7 @@ fn zirNegate( block: *Scope.Block, inst: Zir.Inst.Index, tag_override: Zir.Inst.Tag, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4695,7 +4704,7 @@ fn zirNegate( return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src); } -fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4715,7 +4724,7 @@ fn zirOverflowArithmetic( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4729,13 +4738,13 @@ fn analyzeArithmetic( sema: *Sema, block: *Scope.Block, zir_tag: Zir.Inst.Tag, - lhs: *Inst, - rhs: *Inst, + lhs: Air.Inst.Index, + rhs: Air.Inst.Index, src: LazySrcLoc, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, -) InnerError!*Inst { - const instructions = &[_]*Inst{ lhs, rhs }; +) InnerError!Air.Inst.Index { + const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -4844,7 +4853,7 @@ fn analyzeArithmetic( return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); } -fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4859,7 +4868,7 @@ fn zirAsm( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4899,7 +4908,7 @@ fn zirAsm( }; }; - const args = try sema.arena.alloc(*Inst, inputs_len); + const args = try sema.arena.alloc(Air.Inst.Index, inputs_len); const inputs = try sema.arena.alloc([]const u8, inputs_len); for (args) |*arg, arg_i| { @@ -4943,7 +4952,7 @@ fn zirCmp( block: *Scope.Block, inst: Zir.Inst.Index, op: std.math.CompareOperator, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5009,7 +5018,7 @@ fn zirCmp( return mod.constBool(sema.arena, src, lhs.value().?.eql(rhs.value().?) == (op == .eq)); } - const instructions = &[_]*Inst{ lhs, rhs }; + const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); if (!resolved_type.isSelfComparable(is_equality_cmp)) { return mod.fail(&block.base, src, "operator not allowed for type '{}'", .{resolved_type}); @@ -5041,7 +5050,7 @@ fn zirCmp( return block.addBinOp(src, bool_type, tag, casted_lhs, casted_rhs); } -fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5051,7 +5060,7 @@ fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.constIntUnsigned(sema.arena, src, Type.initTag(.comptime_int), abi_size); } -fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5065,7 +5074,7 @@ fn zirThis( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirThis", .{}); } @@ -5074,7 +5083,7 @@ fn zirRetAddr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirRetAddr", .{}); } @@ -5083,12 +5092,12 @@ fn zirBuiltinSrc( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinSrc", .{}); } -fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); @@ -5131,7 +5140,7 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } } -fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; @@ -5140,7 +5149,7 @@ fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.constType(sema.arena, src, operand.ty); } -fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -5149,13 +5158,13 @@ fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.mod.constType(sema.arena, src, elem_ty); } -fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirTypeofLog2IntType", .{}); } -fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirLog2IntType", .{}); @@ -5165,7 +5174,7 @@ fn zirTypeofPeer( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5173,7 +5182,7 @@ fn zirTypeofPeer( const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; const args = sema.code.refSlice(extra.end, extended.small); - const inst_list = try sema.gpa.alloc(*ir.Inst, args.len); + const inst_list = try sema.gpa.alloc(Air.Inst.Index, args.len); defer sema.gpa.free(inst_list); for (args) |arg_ref, i| { @@ -5184,7 +5193,7 @@ fn zirTypeofPeer( return sema.mod.constType(sema.arena, src, result_type); } -fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5206,7 +5215,7 @@ fn zirBoolOp( block: *Scope.Block, inst: Zir.Inst.Index, comptime is_bool_or: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5237,7 +5246,7 @@ fn zirBoolBr( parent_block: *Scope.Block, inst: Zir.Inst.Index, is_bool_or: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5292,12 +5301,12 @@ fn zirBoolBr( const rhs_result = try sema.resolveBody(rhs_block, body); _ = try rhs_block.addBr(src, block_inst, rhs_result); - const air_then_body: ir.Body = .{ .instructions = try sema.arena.dupe(*Inst, then_block.instructions.items) }; - const air_else_body: ir.Body = .{ .instructions = try sema.arena.dupe(*Inst, else_block.instructions.items) }; + const air_then_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, then_block.instructions.items) }; + const air_else_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, else_block.instructions.items) }; _ = try child_block.addCondBr(src, lhs, air_then_body, air_else_body); block_inst.body = .{ - .instructions = try sema.arena.dupe(*Inst, child_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items), }; try parent_block.instructions.append(sema.gpa, &block_inst.base); return &block_inst.base; @@ -5307,7 +5316,7 @@ fn zirIsNonNull( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5321,7 +5330,7 @@ fn zirIsNonNullPtr( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5332,7 +5341,7 @@ fn zirIsNonNullPtr( return sema.analyzeIsNull(block, src, loaded, true); } -fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5341,7 +5350,7 @@ fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeIsNonErr(block, inst_data.src(), operand); } -fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5385,14 +5394,14 @@ fn zirCondbr( _ = try sema.analyzeBody(&sub_block, then_body); const air_then_body: ir.Body = .{ - .instructions = try sema.arena.dupe(*Inst, sub_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, sub_block.instructions.items), }; sub_block.instructions.shrinkRetainingCapacity(0); _ = try sema.analyzeBody(&sub_block, else_body); const air_else_body: ir.Body = .{ - .instructions = try sema.arena.dupe(*Inst, sub_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, sub_block.instructions.items), }; _ = try parent_block.addCondBr(src, cond, air_then_body, air_else_body); @@ -5470,7 +5479,7 @@ fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError fn analyzeRet( sema: *Sema, block: *Scope.Block, - operand: *Inst, + operand: Air.Inst.Index, src: LazySrcLoc, need_coercion: bool, ) InnerError!Zir.Inst.Index { @@ -5505,7 +5514,7 @@ fn floatOpAllowed(tag: Zir.Inst.Tag) bool { }; } -fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5526,7 +5535,7 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.mod.constType(sema.arena, .unneeded, ty); } -fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5580,7 +5589,7 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.constType(sema.arena, src, ty); } -fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5594,13 +5603,13 @@ fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In }); } -fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnionInitPtr", .{}); } -fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst { +fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { const mod = sema.mod; const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); @@ -5622,7 +5631,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: mem.set(Zir.Inst.Index, found_fields, 0); // The init values to use for the struct instance. - const field_inits = try gpa.alloc(*ir.Inst, struct_obj.fields.count()); + const field_inits = try gpa.alloc(Air.Inst.Index, struct_obj.fields.count()); defer gpa.free(field_inits); var field_i: u32 = 0; @@ -5713,7 +5722,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return mod.fail(&block.base, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{}); } -fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst { +fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5721,7 +5730,7 @@ fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ return sema.mod.fail(&block.base, src, "TODO: Sema.zirStructInitAnon", .{}); } -fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst { +fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5729,7 +5738,7 @@ fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInit", .{}); } -fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst { +fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5737,13 +5746,13 @@ fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_r return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInitAnon", .{}); } -fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldTypeRef", .{}); } -fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const src = inst_data.src(); @@ -5765,7 +5774,7 @@ fn zirErrorReturnTrace( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorReturnTrace", .{}); } @@ -5774,7 +5783,7 @@ fn zirFrame( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrame", .{}); } @@ -5783,84 +5792,84 @@ fn zirFrameAddress( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameAddress", .{}); } -fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignOf", .{}); } -fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBoolToInt", .{}); } -fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirEmbedFile", .{}); } -fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorName", .{}); } -fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnaryMath", .{}); } -fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTagName", .{}); } -fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReify", .{}); } -fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTypeName", .{}); } -fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameType", .{}); } -fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameSize", .{}); } -fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFloatToInt", .{}); } -fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirIntToFloat", .{}); } -fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5923,199 +5932,199 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(src, type_res, .bitcast, operand_coerced); } -fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrSetCast", .{}); } -fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPtrCast", .{}); } -fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTruncate", .{}); } -fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignCast", .{}); } -fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirClz", .{}); } -fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCtz", .{}); } -fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPopCount", .{}); } -fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirByteSwap", .{}); } -fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitReverse", .{}); } -fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivExact", .{}); } -fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivFloor", .{}); } -fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivTrunc", .{}); } -fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMod", .{}); } -fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirRem", .{}); } -fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShlExact", .{}); } -fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShrExact", .{}); } -fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitOffsetOf", .{}); } -fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirOffsetOf", .{}); } -fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCmpxchg", .{}); } -fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirSplat", .{}); } -fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReduce", .{}); } -fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShuffle", .{}); } -fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicLoad", .{}); } -fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicRmw", .{}); } -fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicStore", .{}); } -fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMulAdd", .{}); } -fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinCall", .{}); } -fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldPtrType", .{}); } -fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldParentPtr", .{}); } -fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy", .{}); } -fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset", .{}); } -fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinAsyncCall", .{}); } -fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirResume", .{}); @@ -6126,7 +6135,7 @@ fn zirAwait( block: *Scope.Block, inst: Zir.Inst.Index, is_nosuspend: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -6138,7 +6147,7 @@ fn zirVarExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const src = sema.src; const ty_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at type @@ -6204,7 +6213,7 @@ fn zirFuncExtended( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -6271,7 +6280,7 @@ fn zirCUndef( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCUndef", .{}); @@ -6281,7 +6290,7 @@ fn zirCInclude( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCInclude", .{}); @@ -6291,7 +6300,7 @@ fn zirCDefine( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCDefine", .{}); @@ -6301,7 +6310,7 @@ fn zirWasmMemorySize( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemorySize", .{}); @@ -6311,7 +6320,7 @@ fn zirWasmMemoryGrow( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemoryGrow", .{}); @@ -6321,7 +6330,7 @@ fn zirBuiltinExtern( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinExtern", .{}); @@ -6355,7 +6364,7 @@ pub const PanicId = enum { invalid_error_code, }; -fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id: PanicId) !void { +fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Index, panic_id: PanicId) !void { const block_inst = try sema.arena.create(Inst.Block); block_inst.* = .{ .base = .{ @@ -6364,12 +6373,12 @@ fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id: .src = ok.src, }, .body = .{ - .instructions = try sema.arena.alloc(*Inst, 1), // Only need space for the condbr. + .instructions = try sema.arena.alloc(Air.Inst.Index, 1), // Only need space for the condbr. }, }; const ok_body: ir.Body = .{ - .instructions = try sema.arena.alloc(*Inst, 1), // Only need space for the br_void. + .instructions = try sema.arena.alloc(Air.Inst.Index, 1), // Only need space for the br_void. }; const br_void = try sema.arena.create(Inst.BrVoid); br_void.* = .{ @@ -6395,7 +6404,7 @@ fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id: _ = try sema.safetyPanic(&fail_block, ok.src, panic_id); - const fail_body: ir.Body = .{ .instructions = try sema.arena.dupe(*Inst, fail_block.instructions.items) }; + const fail_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, fail_block.instructions.items) }; const condbr = try sema.arena.create(Inst.CondBr); condbr.* = .{ @@ -6417,7 +6426,7 @@ fn panicWithMsg( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - msg_inst: *ir.Inst, + msg_inst: Air.Inst.Index, ) !Zir.Inst.Index { const mod = sema.mod; const arena = sema.arena; @@ -6438,7 +6447,7 @@ fn panicWithMsg( .ty = try mod.optionalType(arena, ptr_stack_trace_ty), .val = Value.initTag(.null_value), }); - const args = try arena.create([2]*ir.Inst); + const args = try arena.create([2]Air.Inst.Index); args.* = .{ msg_inst, null_stack_trace }; _ = try sema.analyzeCall(block, panic_fn, src, src, .auto, false, args); return always_noreturn; @@ -6494,10 +6503,10 @@ fn namedFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - object_ptr: *Inst, + object_ptr: Air.Inst.Index, field_name: []const u8, field_name_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const mod = sema.mod; const arena = sema.arena; @@ -6647,7 +6656,7 @@ fn analyzeNamespaceLookup( src: LazySrcLoc, namespace: *Scope.Namespace, decl_name: []const u8, -) InnerError!?*Inst { +) InnerError!?Air.Inst.Index { const mod = sema.mod; const gpa = sema.gpa; if (try sema.lookupInNamespace(namespace, decl_name)) |decl| { @@ -6671,11 +6680,11 @@ fn analyzeStructFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - struct_ptr: *Inst, + struct_ptr: Air.Inst.Index, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const mod = sema.mod; const arena = sema.arena; assert(unresolved_struct_ty.zigTypeTag() == .Struct); @@ -6706,11 +6715,11 @@ fn analyzeUnionFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - union_ptr: *Inst, + union_ptr: Air.Inst.Index, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_union_ty: Type, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const mod = sema.mod; const arena = sema.arena; assert(unresolved_union_ty.zigTypeTag() == .Union); @@ -6743,10 +6752,10 @@ fn elemPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: *Inst, - elem_index: *Inst, + array_ptr: Air.Inst.Index, + elem_index: Air.Inst.Index, elem_index_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const array_ty = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, array_ptr.src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -6770,10 +6779,10 @@ fn elemPtrArray( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: *Inst, - elem_index: *Inst, + array_ptr: Air.Inst.Index, + elem_index: Air.Inst.Index, elem_index_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { if (array_ptr.value()) |array_ptr_val| { if (elem_index.value()) |index_val| { // Both array pointer and index are compile-time known. @@ -6798,9 +6807,9 @@ fn coerce( sema: *Sema, block: *Scope.Block, dest_type: Type, - inst: *Inst, + inst: Air.Inst.Index, inst_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { if (dest_type.tag() == .var_args_param) { return sema.coerceVarArgParam(block, inst); } @@ -6976,7 +6985,7 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult return .no_match; } -fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) InnerError!?*Inst { +fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) InnerError!?Air.Inst.Index { const val = inst.value() orelse return null; const src_zig_tag = inst.ty.zigTypeTag(); const dst_zig_tag = dest_type.zigTypeTag(); @@ -7014,7 +7023,7 @@ fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) Inn return null; } -fn coerceVarArgParam(sema: *Sema, block: *Scope.Block, inst: *Inst) !*Inst { +fn coerceVarArgParam(sema: *Sema, block: *Scope.Block, inst: Air.Inst.Index) !Air.Inst.Index { switch (inst.ty.zigTypeTag()) { .ComptimeInt, .ComptimeFloat => return sema.mod.fail(&block.base, inst.src, "integer and float literals in var args function must be casted", .{}), else => {}, @@ -7027,8 +7036,8 @@ fn storePtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - ptr: *Inst, - uncasted_value: *Inst, + ptr: Air.Inst.Index, + uncasted_value: Air.Inst.Index, ) !void { if (ptr.ty.isConstPtr()) return sema.mod.fail(&block.base, src, "cannot assign to constant", .{}); @@ -7076,7 +7085,7 @@ fn storePtr( _ = try block.addBinOp(src, Type.initTag(.void), .store, ptr, value); } -fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { if (inst.value()) |val| { // Keep the comptime Value representation; take the new type. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7086,7 +7095,7 @@ fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Ins return block.addUnOp(inst.src, dest_type, .bitcast, inst); } -fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7094,7 +7103,7 @@ fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{}); } -fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7102,12 +7111,12 @@ fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); } -fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!*Inst { +fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index { const decl_ref = try sema.analyzeDeclRef(block, src, decl); return sema.analyzeLoad(block, src, decl_ref, src); } -fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!*Inst { +fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index { try sema.mod.declareDeclDependency(sema.owner_decl, decl); sema.mod.ensureDeclAnalyzed(decl) catch |err| { if (sema.func) |func| { @@ -7128,7 +7137,7 @@ fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl }); } -fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!*Inst { +fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!Air.Inst.Index { const variable = tv.val.castTag(.variable).?.data; const ty = try sema.mod.simplePtrType(sema.arena, tv.ty, variable.is_mutable, .One); @@ -7157,8 +7166,8 @@ fn analyzeRef( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: *Inst, -) InnerError!*Inst { + operand: Air.Inst.Index, +) InnerError!Air.Inst.Index { const ptr_type = try sema.mod.simplePtrType(sema.arena, operand.ty, false, .One); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |val| { @@ -7176,9 +7185,9 @@ fn analyzeLoad( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - ptr: *Inst, + ptr: Air.Inst.Index, ptr_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const elem_ty = switch (ptr.ty.zigTypeTag()) { .Pointer => ptr.ty.elemType(), else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr.ty}), @@ -7201,9 +7210,9 @@ fn analyzeIsNull( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: *Inst, + operand: Air.Inst.Index, invert_logic: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const result_ty = Type.initTag(.bool); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |opt_val| { if (opt_val.isUndef()) { @@ -7222,8 +7231,8 @@ fn analyzeIsNonErr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: *Inst, -) InnerError!*Inst { + operand: Air.Inst.Index, +) InnerError!Air.Inst.Index { const ot = operand.ty.zigTypeTag(); if (ot != .ErrorSet and ot != .ErrorUnion) return sema.mod.constBool(sema.arena, src, true); if (ot == .ErrorSet) return sema.mod.constBool(sema.arena, src, false); @@ -7243,12 +7252,12 @@ fn analyzeSlice( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: *Inst, - start: *Inst, - end_opt: ?*Inst, - sentinel_opt: ?*Inst, + array_ptr: Air.Inst.Index, + start: Air.Inst.Index, + end_opt: ?Air.Inst.Index, + sentinel_opt: ?Air.Inst.Index, sentinel_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const ptr_child = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -7319,10 +7328,10 @@ fn cmpNumeric( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - lhs: *Inst, - rhs: *Inst, + lhs: Air.Inst.Index, + rhs: Air.Inst.Index, op: std.math.CompareOperator, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { assert(lhs.ty.isNumeric()); assert(rhs.ty.isNumeric()); @@ -7488,7 +7497,7 @@ fn cmpNumeric( return block.addBinOp(src, Type.initTag(.bool), Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } -fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { if (inst.value()) |val| { return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); } @@ -7497,7 +7506,7 @@ fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) return block.addUnOp(inst.src, dest_type, .wrap_optional, inst); } -fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { const err_union = dest_type.castTag(.error_union).?; if (inst.value()) |val| { if (inst.ty.zigTypeTag() != .ErrorSet) { @@ -7568,7 +7577,7 @@ fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst } } -fn resolvePeerTypes(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, instructions: []*Inst) !Type { +fn resolvePeerTypes(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, instructions: []Air.Inst.Index) !Type { if (instructions.len == 0) return Type.initTag(.noreturn); @@ -7704,7 +7713,7 @@ fn getBuiltin( block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!*ir.Inst { +) InnerError!Air.Inst.Index { const mod = sema.mod; const std_pkg = mod.root_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 60e9a96275..4a9087d7f5 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -18,14 +18,14 @@ pub const Word = u32; pub const ResultId = u32; pub const TypeMap = std.HashMap(Type, u32, Type.HashContext64, std.hash_map.default_max_load_percentage); -pub const InstMap = std.AutoHashMap(*Inst, ResultId); +pub const InstMap = std.AutoHashMap(Air.Inst.Index, ResultId); const IncomingBlock = struct { src_label_id: ResultId, break_value_id: ResultId, }; -pub const BlockMap = std.AutoHashMap(*Inst.Block, struct { +pub const BlockMap = std.AutoHashMap(Air.Inst.Index, struct { label_id: ResultId, incoming_blocks: *std.ArrayListUnmanaged(IncomingBlock), }); @@ -279,16 +279,17 @@ pub const DeclGen = struct { return self.spv.module.getTarget(); } - fn fail(self: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) Error { + fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); + const src: LazySrcLoc = .{ .node_offset = 0 }; const src_loc = src.toSrcLocWithDecl(self.decl); self.error_msg = try Module.ErrorMsg.create(self.spv.module.gpa, src_loc, format, args); return error.AnalysisFail; } - fn resolve(self: *DeclGen, inst: *Inst) !ResultId { + fn resolve(self: *DeclGen, inst: Air.Inst.Index) !ResultId { if (inst.value()) |val| { - return self.genConstant(inst.src, inst.ty, val); + return self.genConstant(inst.ty, val); } return self.inst_results.get(inst).?; // Instruction does not dominate all uses! @@ -313,7 +314,7 @@ pub const DeclGen = struct { const target = self.getTarget(); // The backend will never be asked to compiler a 0-bit integer, so we won't have to handle those in this function. - std.debug.assert(bits != 0); + assert(bits != 0); // 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively. // 32-bit integers are always supported (see spec, 2.16.1, Data rules). @@ -387,19 +388,19 @@ pub const DeclGen = struct { .composite_integer }; }, // As of yet, there is no vector support in the self-hosted compiler. - .Vector => self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}), + .Vector => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}), // TODO: For which types is this the case? - else => self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}), + else => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}), }; } /// Generate a constant representing `val`. /// TODO: Deduplication? - fn genConstant(self: *DeclGen, src: LazySrcLoc, ty: Type, val: Value) Error!ResultId { + fn genConstant(self: *DeclGen, ty: Type, val: Value) Error!ResultId { const target = self.getTarget(); const code = &self.spv.binary.types_globals_constants; const result_id = self.spv.allocResultId(); - const result_type_id = try self.genType(src, ty); + const result_type_id = try self.genType(ty); if (val.isUndef()) { try writeInstruction(code, .OpUndef, &[_]Word{ result_type_id, result_id }); @@ -411,13 +412,13 @@ pub const DeclGen = struct { const int_info = ty.intInfo(target); const backing_bits = self.backingIntBits(int_info.bits) orelse { // Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits. - return self.fail(src, "TODO: SPIR-V backend: implement composite int constants for {}", .{ty}); + return self.fail("TODO: SPIR-V backend: implement composite int constants for {}", .{ty}); }; // We can just use toSignedInt/toUnsignedInt here as it returns u64 - a type large enough to hold any // SPIR-V native type (up to i/u64 with Int64). If SPIR-V ever supports native ints of a larger size, this // might need to be updated. - std.debug.assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64)); + assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64)); var int_bits = if (ty.isSignedInt()) @bitCast(u64, val.toSignedInt()) else val.toUnsignedInt(); // Mask the low bits which make up the actual integer. This is to make sure that negative values @@ -469,13 +470,13 @@ pub const DeclGen = struct { } }, .Void => unreachable, - else => return self.fail(src, "TODO: SPIR-V backend: constant generation of type {}", .{ty}), + else => return self.fail("TODO: SPIR-V backend: constant generation of type {}", .{ty}), } return result_id; } - fn genType(self: *DeclGen, src: LazySrcLoc, ty: Type) Error!ResultId { + fn genType(self: *DeclGen, ty: Type) Error!ResultId { // We can't use getOrPut here so we can recursively generate types. if (self.spv.types.get(ty)) |already_generated| { return already_generated; @@ -492,7 +493,7 @@ pub const DeclGen = struct { const int_info = ty.intInfo(target); const backing_bits = self.backingIntBits(int_info.bits) orelse { // Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits. - return self.fail(src, "TODO: SPIR-V backend: implement composite int {}", .{ty}); + return self.fail("TODO: SPIR-V backend: implement composite int {}", .{ty}); }; // TODO: If backing_bits != int_info.bits, a duplicate type might be generated here. @@ -518,7 +519,7 @@ pub const DeclGen = struct { }; if (!supported) { - return self.fail(src, "Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits}); + return self.fail("Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits}); } try writeInstruction(code, .OpTypeFloat, &[_]Word{ result_id, bits }); @@ -526,19 +527,19 @@ pub const DeclGen = struct { .Fn => { // We only support zig-calling-convention functions, no varargs. if (ty.fnCallingConvention() != .Unspecified) - return self.fail(src, "Unsupported calling convention for SPIR-V", .{}); + return self.fail("Unsupported calling convention for SPIR-V", .{}); if (ty.fnIsVarArgs()) - return self.fail(src, "VarArgs unsupported for SPIR-V", .{}); + return self.fail("VarArgs unsupported for SPIR-V", .{}); // In order to avoid a temporary here, first generate all the required types and then simply look them up // when generating the function type. const params = ty.fnParamLen(); var i: usize = 0; while (i < params) : (i += 1) { - _ = try self.genType(src, ty.fnParamType(i)); + _ = try self.genType(ty.fnParamType(i)); } - const return_type_id = try self.genType(src, ty.fnReturnType()); + const return_type_id = try self.genType(ty.fnReturnType()); // result id + result type id + parameter type ids. try writeOpcode(code, .OpTypeFunction, 2 + @intCast(u16, ty.fnParamLen())); @@ -551,7 +552,7 @@ pub const DeclGen = struct { } }, // When recursively generating a type, we cannot infer the pointer's storage class. See genPointerType. - .Pointer => return self.fail(src, "Cannot create pointer with unkown storage class", .{}), + .Pointer => return self.fail("Cannot create pointer with unkown storage class", .{}), .Vector => { // Although not 100% the same, Zig vectors map quite neatly to SPIR-V vectors (including many integer and float operations // which work on them), so simply use those. @@ -561,7 +562,7 @@ pub const DeclGen = struct { // is adequate at all for this. // TODO: Vectors are not yet supported by the self-hosted compiler itself it seems. - return self.fail(src, "TODO: SPIR-V backend: implement type Vector", .{}); + return self.fail("TODO: SPIR-V backend: implement type Vector", .{}); }, .Null, .Undefined, @@ -573,7 +574,7 @@ pub const DeclGen = struct { .BoundFn => unreachable, // this type will be deleted from the language. - else => |tag| return self.fail(src, "TODO: SPIR-V backend: implement type {}s", .{tag}), + else => |tag| return self.fail("TODO: SPIR-V backend: implement type {}s", .{tag}), } try self.spv.types.putNoClobber(ty, result_id); @@ -582,8 +583,8 @@ pub const DeclGen = struct { /// SPIR-V requires pointers to have a storage class (address space), and so we have a special function for that. /// TODO: The result of this needs to be cached. - fn genPointerType(self: *DeclGen, src: LazySrcLoc, ty: Type, storage_class: spec.StorageClass) !ResultId { - std.debug.assert(ty.zigTypeTag() == .Pointer); + fn genPointerType(self: *DeclGen, ty: Type, storage_class: spec.StorageClass) !ResultId { + assert(ty.zigTypeTag() == .Pointer); const code = &self.spv.binary.types_globals_constants; const result_id = self.spv.allocResultId(); @@ -591,7 +592,7 @@ pub const DeclGen = struct { // TODO: There are many constraints which are ignored for now: We may only create pointers to certain types, and to other types // if more capabilities are enabled. For example, we may only create pointers to f16 if Float16Buffer is enabled. // These also relates to the pointer's address space. - const child_id = try self.genType(src, ty.elemType()); + const child_id = try self.genType(ty.elemType()); try writeInstruction(code, .OpTypePointer, &[_]Word{ result_id, @enumToInt(storage_class), child_id }); @@ -602,9 +603,9 @@ pub const DeclGen = struct { const decl = self.decl; const result_id = decl.fn_link.spirv.id; - if (decl.val.castTag(.function)) |func_payload| { - std.debug.assert(decl.ty.zigTypeTag() == .Fn); - const prototype_id = try self.genType(.{ .node_offset = 0 }, decl.ty); + if (decl.val.castTag(.function)) |_| { + assert(decl.ty.zigTypeTag() == .Fn); + const prototype_id = try self.genType(decl.ty); try writeInstruction(&self.spv.binary.fn_decls, .OpFunction, &[_]Word{ self.spv.types.get(decl.ty.fnReturnType()).?, // This type should be generated along with the prototype. result_id, @@ -631,189 +632,167 @@ pub const DeclGen = struct { try writeInstruction(&self.spv.binary.fn_decls, .OpLabel, &[_]Word{root_block_id}); self.current_block_label_id = root_block_id; - try self.genBody(func_payload.data.body); + const main_body = self.air.getMainBody(); + try self.genBody(main_body); // Append the actual code into the fn_decls section. try self.spv.binary.fn_decls.appendSlice(self.code.items); try writeInstruction(&self.spv.binary.fn_decls, .OpFunctionEnd, &[_]Word{}); } else { - return self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: generate decl type {}", .{decl.ty.zigTypeTag()}); + return self.fail("TODO: SPIR-V backend: generate decl type {}", .{decl.ty.zigTypeTag()}); } } - fn genBody(self: *DeclGen, body: ir.Body) Error!void { - for (body.instructions) |inst| { + fn genBody(self: *DeclGen, body: []const Air.Inst.Index) Error!void { + for (body) |inst| { try self.genInst(inst); } } - fn genInst(self: *DeclGen, inst: *Inst) !void { - const result_id = switch (inst.tag) { - .add, .addwrap => try self.genBinOp(inst.castTag(.add).?), - .sub, .subwrap => try self.genBinOp(inst.castTag(.sub).?), - .mul, .mulwrap => try self.genBinOp(inst.castTag(.mul).?), - .div => try self.genBinOp(inst.castTag(.div).?), - .bit_and => try self.genBinOp(inst.castTag(.bit_and).?), - .bit_or => try self.genBinOp(inst.castTag(.bit_or).?), - .xor => try self.genBinOp(inst.castTag(.xor).?), - .cmp_eq => try self.genCmp(inst.castTag(.cmp_eq).?), - .cmp_neq => try self.genCmp(inst.castTag(.cmp_neq).?), - .cmp_gt => try self.genCmp(inst.castTag(.cmp_gt).?), - .cmp_gte => try self.genCmp(inst.castTag(.cmp_gte).?), - .cmp_lt => try self.genCmp(inst.castTag(.cmp_lt).?), - .cmp_lte => try self.genCmp(inst.castTag(.cmp_lte).?), - .bool_and => try self.genBinOp(inst.castTag(.bool_and).?), - .bool_or => try self.genBinOp(inst.castTag(.bool_or).?), - .not => try self.genUnOp(inst.castTag(.not).?), - .alloc => try self.genAlloc(inst.castTag(.alloc).?), - .arg => self.genArg(), - .block => (try self.genBlock(inst.castTag(.block).?)) orelse return, - .br => return try self.genBr(inst.castTag(.br).?), - .br_void => return try self.genBrVoid(inst.castTag(.br_void).?), - // TODO: Breakpoints won't be supported in SPIR-V, but the compiler seems to insert them - // throughout the IR. + fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void { + const air_tags = self.air.instructions.items(.tag); + const result_id = switch (air_tags[inst]) { + // zig fmt: off + .add, .addwrap => try self.genArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}), + .sub, .subwrap => try self.genArithOp(inst, .{.OpFSub, .OpISub, .OpISub}), + .mul, .mulwrap => try self.genArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}), + .div => try self.genArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}), + + .bit_and => try self.genBinOpSimple(inst, .OpBitwiseAnd), + .bit_or => try self.genBinOpSimple(inst, .OpBitwiseOr), + .xor => try self.genBinOpSimple(inst, .OpBitwiseXor), + .bool_and => try self.genBinOpSimple(inst, .OpLogicalAnd), + .bool_or => try self.genBinOpSimple(inst, .OpLogicalOr), + + .not => try self.genNot(inst), + + .cmp_eq => try self.genCmp(inst, .{.OpFOrdEqual, .OpLogicalEqual, .OpIEqual}), + .cmp_neq => try self.genCmp(inst, .{.OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual}), + .cmp_gt => try self.genCmp(inst, .{.OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan}), + .cmp_gte => try self.genCmp(inst, .{.OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual}), + .cmp_lt => try self.genCmp(inst, .{.OpFOrdLessThan, .OpSLessThan, .OpULessThan}), + .cmp_lte => try self.genCmp(inst, .{.OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual}), + + .arg => self.genArg(), + .alloc => try self.genAlloc(inst), + .block => (try self.genBlock(inst)) orelse return, + .load => try self.genLoad(inst), + + .br => return self.genBr(inst), .breakpoint => return, - .condbr => return try self.genCondBr(inst.castTag(.condbr).?), - .constant => unreachable, - .dbg_stmt => return try self.genDbgStmt(inst.castTag(.dbg_stmt).?), - .load => try self.genLoad(inst.castTag(.load).?), - .loop => return try self.genLoop(inst.castTag(.loop).?), - .ret => return try self.genRet(inst.castTag(.ret).?), - .retvoid => return try self.genRetVoid(), - .store => return try self.genStore(inst.castTag(.store).?), - .unreach => return try self.genUnreach(), - else => return self.fail(inst.src, "TODO: SPIR-V backend: implement inst {s}", .{@tagName(inst.tag)}), + .condbr => return self.genCondBr(inst), + .constant => unreachable, + .dbg_stmt => return self.genDbgStmt(inst), + .loop => return self.genLoop(inst), + .ret => return self.genRet(inst), + .store => return self.genStore(inst), + .unreach => return self.genUnreach(), + // zig fmt: on }; try self.inst_results.putNoClobber(inst, result_id); } - fn genBinOp(self: *DeclGen, inst: *Inst.BinOp) !ResultId { - // TODO: Will lhs and rhs have the same type? - const lhs_id = try self.resolve(inst.lhs); - const rhs_id = try self.resolve(inst.rhs); + fn genBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, opcode: Opcode) !ResultId { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs_id = try self.resolve(bin_op.lhs); + const rhs_id = try self.resolve(bin_op.rhs); + const result_id = self.spv.allocResultId(); + try writeInstruction(&self.code, opcode, &[_]Word{ + result_type_id, result_id, lhs_id, rhs_id, + }); + return result_id; + } + + fn genArithOp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { + // LHS and RHS are guaranteed to have the same type, and AIR guarantees + // the result to be the same as the LHS and RHS, which matches SPIR-V. + const ty = self.air.getType(inst); + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs_id = try self.resolve(bin_op.lhs); + const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocResultId(); - const result_type_id = try self.genType(inst.base.src, inst.base.ty); - - // TODO: Is the result the same as the argument types? - // This is supposed to be the case for SPIR-V. - std.debug.assert(inst.rhs.ty.eql(inst.lhs.ty)); - std.debug.assert(inst.base.ty.tag() == .bool or inst.base.ty.eql(inst.lhs.ty)); - - // Binary operations are generally applicable to both scalar and vector operations in SPIR-V, but int and float - // versions of operations require different opcodes. - // For operations which produce bools, the information of inst.base.ty is not useful, so just pick either operand - // instead. - const info = try self.arithmeticTypeInfo(inst.lhs.ty); - - if (info.class == .composite_integer) { - return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for composite integers", .{}); - } else if (info.class == .strange_integer) { - return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for strange integers", .{}); - } + const result_type_id = try self.genType(ty); + + assert(self.air.getType(bin_op.lhs).eql(ty)); + assert(self.air.getType(bin_op.rhs).eql(ty)); - const is_float = info.class == .float; - const is_signed = info.signedness == .signed; - // **Note**: All these operations must be valid for vectors as well! - const opcode = switch (inst.base.tag) { - // The regular integer operations are all defined for wrapping. Since theyre only relevant for integers, - // we can just switch on both cases here. - .add, .addwrap => if (is_float) Opcode.OpFAdd else Opcode.OpIAdd, - .sub, .subwrap => if (is_float) Opcode.OpFSub else Opcode.OpISub, - .mul, .mulwrap => if (is_float) Opcode.OpFMul else Opcode.OpIMul, - // TODO: Trap if divisor is 0? - // TODO: Figure out of OpSDiv for unsigned/OpUDiv for signed does anything useful. - // => Those are probably for divTrunc and divFloor, though the compiler does not yet generate those. - // => TODO: Figure out how those work on the SPIR-V side. - // => TODO: Test these. - .div => if (is_float) Opcode.OpFDiv else if (is_signed) Opcode.OpSDiv else Opcode.OpUDiv, - // Only integer versions for these. - .bit_and => Opcode.OpBitwiseAnd, - .bit_or => Opcode.OpBitwiseOr, - .xor => Opcode.OpBitwiseXor, - // Bool -> bool operations. - .bool_and => Opcode.OpLogicalAnd, - .bool_or => Opcode.OpLogicalOr, + // Binary operations are generally applicable to both scalar and vector operations + // in SPIR-V, but int and float versions of operations require different opcodes. + const info = try self.arithmeticTypeInfo(ty); + + const opcode_index: usize = switch (info.class) { + .composite_integer => { + return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{}); + }, + .strange_integer => { + return self.fail("TODO: SPIR-V backend: binary operations for strange integers", .{}); + }, + .integer => switch (info.signedness) { + .signed => 1, + .unsigned => 2, + }, + .float => 0, else => unreachable, }; - + const opcode = ops[opcode_index]; try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id }); // TODO: Trap on overflow? Probably going to be annoying. // TODO: Look into SPV_KHR_no_integer_wrap_decoration which provides NoSignedWrap/NoUnsignedWrap. - if (info.class != .strange_integer) - return result_id; - - return self.fail(inst.base.src, "TODO: SPIR-V backend: strange integer operation mask", .{}); + return result_id; } - fn genCmp(self: *DeclGen, inst: *Inst.BinOp) !ResultId { - const lhs_id = try self.resolve(inst.lhs); - const rhs_id = try self.resolve(inst.rhs); - + fn genCmp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs_id = try self.resolve(bin_op.lhs); + const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocResultId(); - const result_type_id = try self.genType(inst.base.src, inst.base.ty); - - // All of these operations should be 2 equal types -> bool - std.debug.assert(inst.rhs.ty.eql(inst.lhs.ty)); - std.debug.assert(inst.base.ty.tag() == .bool); - - // Comparisons are generally applicable to both scalar and vector operations in SPIR-V, but int and float - // versions of operations require different opcodes. - // Since inst.base.ty is always bool and so not very useful, and because both arguments must be the same, just get the info - // from either of the operands. - const info = try self.arithmeticTypeInfo(inst.lhs.ty); - - if (info.class == .composite_integer) { - return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for composite integers", .{}); - } else if (info.class == .strange_integer) { - return self.fail(inst.base.src, "TODO: SPIR-V backend: comparison for strange integers", .{}); - } + const result_type_id = try self.genType(Type.initTag(.bool)); + const op_ty = self.air.getType(bin_op.lhs); + assert(op_ty.eql(self.air.getType(bin_op.rhs))); - const is_bool = info.class == .bool; - const is_float = info.class == .float; - const is_signed = info.signedness == .signed; - - // **Note**: All these operations must be valid for vectors as well! - // For floating points, we generally want ordered operations (which return false if either operand is nan). - const opcode = switch (inst.base.tag) { - .cmp_eq => if (is_float) Opcode.OpFOrdEqual else if (is_bool) Opcode.OpLogicalEqual else Opcode.OpIEqual, - .cmp_neq => if (is_float) Opcode.OpFOrdNotEqual else if (is_bool) Opcode.OpLogicalNotEqual else Opcode.OpINotEqual, - // TODO: Verify that these OpFOrd type operations produce the right value. - // TODO: Is there a more fundamental difference between OpU and OpS operations here than just the type? - .cmp_gt => if (is_float) Opcode.OpFOrdGreaterThan else if (is_signed) Opcode.OpSGreaterThan else Opcode.OpUGreaterThan, - .cmp_gte => if (is_float) Opcode.OpFOrdGreaterThanEqual else if (is_signed) Opcode.OpSGreaterThanEqual else Opcode.OpUGreaterThanEqual, - .cmp_lt => if (is_float) Opcode.OpFOrdLessThan else if (is_signed) Opcode.OpSLessThan else Opcode.OpULessThan, - .cmp_lte => if (is_float) Opcode.OpFOrdLessThanEqual else if (is_signed) Opcode.OpSLessThanEqual else Opcode.OpULessThanEqual, + // Comparisons are generally applicable to both scalar and vector operations in SPIR-V, + // but int and float versions of operations require different opcodes. + const info = try self.arithmeticTypeInfo(op_ty); + + const opcode_index: usize = switch (info.class) { + .composite_integer => { + return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{}); + }, + .strange_integer => { + return self.fail("TODO: SPIR-V backend: comparison for strange integers", .{}); + }, + .float => 0, + .bool => 1, + .integer => switch (info.signedness) { + .signed => 1, + .unsigned => 2, + }, else => unreachable, }; + const opcode = ops[opcode_index]; try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id }); return result_id; } - fn genUnOp(self: *DeclGen, inst: *Inst.UnOp) !ResultId { - const operand_id = try self.resolve(inst.operand); - + fn genNot(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand_id = try self.resolve(ty_op.operand); const result_id = self.spv.allocResultId(); - const result_type_id = try self.genType(inst.base.src, inst.base.ty); - - const opcode = switch (inst.base.tag) { - // Bool -> bool - .not => Opcode.OpLogicalNot, - else => unreachable, - }; - + const result_type_id = try self.genType(Type.initTag(.bool)); + const opcode: Opcode = .OpLogicalNot; try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, operand_id }); - return result_id; } - fn genAlloc(self: *DeclGen, inst: *Inst.NoOp) !ResultId { + fn genAlloc(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + const ty = self.air.getType(inst); const storage_class = spec.StorageClass.Function; - const result_type_id = try self.genPointerType(inst.base.src, inst.base.ty, storage_class); + const result_type_id = try self.genPointerType(ty, storage_class); const result_id = self.spv.allocResultId(); // Rather than generating into code here, we're just going to generate directly into the fn_decls section so that @@ -828,7 +807,7 @@ pub const DeclGen = struct { return self.args.items[self.next_arg_index]; } - fn genBlock(self: *DeclGen, inst: *Inst.Block) !?ResultId { + fn genBlock(self: *DeclGen, inst: Air.Inst.Index) !?ResultId { // In IR, a block doesn't really define an entry point like a block, but more like a scope that breaks can jump out of and // "return" a value from. This cannot be directly modelled in SPIR-V, so in a block instruction, we're going to split up // the current block by first generating the code of the block, then a label, and then generate the rest of the current @@ -848,11 +827,16 @@ pub const DeclGen = struct { incoming_blocks.deinit(self.spv.gpa); } - try self.genBody(inst.body); + const ty = self.air.getType(inst); + const inst_datas = self.air.instructions.items(.data); + const extra = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + const body = self.air.extra[extra.end..][0..extra.data.body_len]; + + try self.genBody(body); try self.beginSPIRVBlock(label_id); // If this block didn't produce a value, simply return here. - if (!inst.base.ty.hasCodeGenBits()) + if (!ty.hasCodeGenBits()) return null; // Combine the result from the blocks using the Phi instruction. @@ -862,7 +846,7 @@ pub const DeclGen = struct { // TODO: OpPhi is limited in the types that it may produce, such as pointers. Figure out which other types // are not allowed to be created from a phi node, and throw an error for those. For now, genType already throws // an error for pointers. - const result_type_id = try self.genType(inst.base.src, inst.base.ty); + const result_type_id = try self.genType(ty); _ = result_type_id; try writeOpcode(&self.code, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent... @@ -874,30 +858,26 @@ pub const DeclGen = struct { return result_id; } - fn genBr(self: *DeclGen, inst: *Inst.Br) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - const target = self.blocks.get(inst.block).?; + fn genBr(self: *DeclGen, inst: Air.Inst.Index) !void { + const br = self.air.instructions.items(.data)[inst].br; + const block = self.blocks.get(br.block_inst).?; + const operand_ty = self.air.getType(br.operand); - // TODO: For some reason, br is emitted with void parameters. - if (inst.operand.ty.hasCodeGenBits()) { - const operand_id = try self.resolve(inst.operand); + if (operand_ty.hasCodeGenBits()) { + const operand_id = try self.resolve(br.operand); // current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body. - try target.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id }); + try block.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id }); } - try writeInstruction(&self.code, .OpBranch, &[_]Word{target.label_id}); - } - - fn genBrVoid(self: *DeclGen, inst: *Inst.BrVoid) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - const target = self.blocks.get(inst.block).?; - // Don't need to add this to the incoming block list, as there is no value to insert in the phi node anyway. - try writeInstruction(&self.code, .OpBranch, &[_]Word{target.label_id}); + try writeInstruction(&self.code, .OpBranch, &[_]Word{block.label_id}); } fn genCondBr(self: *DeclGen, inst: *Inst.CondBr) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - const condition_id = try self.resolve(inst.condition); + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const cond_br = self.air.extraData(Air.CondBr, pl_op.payload); + const then_body = self.air.extra[cond_br.end..][0..cond_br.data.then_body_len]; + const else_body = self.air.extra[cond_br.end + then_body.len ..][0..cond_br.data.else_body_len]; + const condition_id = try self.resolve(pl_op.operand); // These will always generate a new SPIR-V block, since they are ir.Body and not ir.Block. const then_label_id = self.spv.allocResultId(); @@ -913,23 +893,26 @@ pub const DeclGen = struct { }); try self.beginSPIRVBlock(then_label_id); - try self.genBody(inst.then_body); + try self.genBody(then_body); try self.beginSPIRVBlock(else_label_id); - try self.genBody(inst.else_body); + try self.genBody(else_body); } - fn genDbgStmt(self: *DeclGen, inst: *Inst.DbgStmt) !void { + fn genDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { + const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; const src_fname_id = try self.spv.resolveSourceFileName(self.decl); - try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, inst.line, inst.column }); + try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, dbg_stmt.line, dbg_stmt.column }); } - fn genLoad(self: *DeclGen, inst: *Inst.UnOp) !ResultId { - const operand_id = try self.resolve(inst.operand); + fn genLoad(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand_id = try self.resolve(ty_op.operand); + const ty = self.air.getType(inst); - const result_type_id = try self.genType(inst.base.src, inst.base.ty); + const result_type_id = try self.genType(ty); const result_id = self.spv.allocResultId(); - const operands = if (inst.base.ty.isVolatilePtr()) + const operands = if (ty.isVolatilePtr()) &[_]Word{ result_type_id, result_id, operand_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) } else &[_]Word{ result_type_id, result_id, operand_id }; @@ -939,8 +922,9 @@ pub const DeclGen = struct { return result_id; } - fn genLoop(self: *DeclGen, inst: *Inst.Loop) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? + fn genLoop(self: *DeclGen, inst: Air.Inst.Index) !void { + const loop = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + const body = self.air.extra[loop.end..][0..loop.data.body_len]; const loop_label_id = self.spv.allocResultId(); // Jump to the loop entry point @@ -949,27 +933,29 @@ pub const DeclGen = struct { // TODO: Look into OpLoopMerge. try self.beginSPIRVBlock(loop_label_id); - try self.genBody(inst.body); + try self.genBody(body); try writeInstruction(&self.code, .OpBranch, &[_]Word{loop_label_id}); } - fn genRet(self: *DeclGen, inst: *Inst.UnOp) !void { - const operand_id = try self.resolve(inst.operand); - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id}); - } - - fn genRetVoid(self: *DeclGen) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - try writeInstruction(&self.code, .OpReturn, &[_]Word{}); + fn genRet(self: *DeclGen, inst: Air.Inst.Index) !void { + const operand = inst_datas[inst].un_op; + const operand_ty = self.air.getType(operand); + if (operand_ty.hasCodeGenBits()) { + const operand_id = try self.resolve(operand); + try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id}); + } else { + try writeInstruction(&self.code, .OpReturn, &[_]Word{}); + } } - fn genStore(self: *DeclGen, inst: *Inst.BinOp) !void { - const dst_ptr_id = try self.resolve(inst.lhs); - const src_val_id = try self.resolve(inst.rhs); + fn genStore(self: *DeclGen, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const dst_ptr_id = try self.resolve(bin_op.lhs); + const src_val_id = try self.resolve(bin_op.rhs); + const lhs_ty = self.air.getType(bin_op.lhs); - const operands = if (inst.lhs.ty.isVolatilePtr()) + const operands = if (lhs_ty.isVolatilePtr()) &[_]Word{ dst_ptr_id, src_val_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) } else &[_]Word{ dst_ptr_id, src_val_id }; @@ -978,7 +964,6 @@ pub const DeclGen = struct { } fn genUnreach(self: *DeclGen) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? try writeInstruction(&self.code, .OpUnreachable, &[_]Word{}); } }; -- cgit v1.2.3 From ee6432537ee29485c5de6c8b0911ef1482d752a7 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 11 Jul 2021 23:38:29 -0700 Subject: stage2: first pass over codegen.zig for AIR memory layout --- BRANCH_TODO | 38 -- src/Liveness.zig | 21 + src/codegen.zig | 1365 +++++++++++++++++++++++++--------------------- src/register_manager.zig | 11 +- 4 files changed, 769 insertions(+), 666 deletions(-) (limited to 'src/Liveness.zig') diff --git a/BRANCH_TODO b/BRANCH_TODO index 3b946edbbd..be3959e035 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,48 +1,10 @@ * be sure to test debug info of parameters - pub fn isUnused(self: Inst) bool { - return (self.deaths & (1 << unreferenced_bit_index)) != 0; - } - - pub fn operandDies(self: Inst, index: DeathsBitIndex) bool { - assert(index < deaths_bits); - return @truncate(u1, self.deaths >> index) != 0; - } - - pub fn clearOperandDeath(self: *Inst, index: DeathsBitIndex) void { - assert(index < deaths_bits); - self.deaths &= ~(@as(DeathsInt, 1) << index); - } - pub fn specialOperandDeaths(self: Inst) bool { return (self.deaths & (1 << deaths_bits)) != 0; } - pub fn operandCount(base: *Inst) usize { - inline for (@typeInfo(Tag).Enum.fields) |field| { - const tag = @intToEnum(Tag, field.value); - if (tag == base.tag) { - return @fieldParentPtr(tag.Type(), "base", base).operandCount(); - } - } - unreachable; - } - - pub fn getOperand(base: *Inst, index: usize) ?*Inst { - inline for (@typeInfo(Tag).Enum.fields) |field| { - const tag = @intToEnum(Tag, field.value); - if (tag == base.tag) { - return @fieldParentPtr(tag.Type(), "base", base).getOperand(index); - } - } - unreachable; - } - - pub fn Args(comptime T: type) type { - return std.meta.fieldInfo(T, .args).field_type; - } - /// Returns `null` if runtime-known. /// Should be called by codegen, not by Sema. Sema functions should call /// `resolvePossiblyUndefinedValue` or `resolveDefinedValue` instead. diff --git a/src/Liveness.zig b/src/Liveness.zig index 84e2495054..0cbac61118 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -74,6 +74,26 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { }; } +pub fn isUnused(l: Liveness, inst: Air.Inst.Index) bool { + const usize_index = (inst * bpi) / @bitSizeOf(usize); + const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1)); + return (l.tomb_bits[usize_index] & mask) != 0; +} + +pub fn operandDies(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) bool { + assert(operand < bpi - 1); + const usize_index = (inst * bpi) / @bitSizeOf(usize); + const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); + return (l.tomb_bits[usize_index] & mask) != 0; +} + +pub fn clearOperandDeath(l: *Liveness, inst: Air.Inst.Index, operand: OperandInt) void { + assert(operand < bpi - 1); + const usize_index = (inst * bpi) / @bitSizeOf(usize); + const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); + l.tomb_bits[usize_index] |= mask; +} + pub fn deinit(l: *Liveness, gpa: *Allocator) void { gpa.free(l.tomb_bits); gpa.free(l.extra); @@ -83,6 +103,7 @@ pub fn deinit(l: *Liveness, gpa: *Allocator) void { /// How many tomb bits per AIR instruction. const bpi = 4; const Bpi = std.meta.Int(.unsigned, bpi); +const OperandInt = std.math.Log2Int(Bpi); /// In-progress data; on successful analysis converted into `Liveness`. const Analysis = struct { diff --git a/src/codegen.zig b/src/codegen.zig index 91b0401291..65e85702e5 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -722,16 +722,18 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { - for (body.instructions) |inst| { - try self.ensureProcessDeathCapacity(@popCount(@TypeOf(inst.deaths), inst.deaths)); + for (body) |inst| { + const tomb_bits = self.liveness.getTombBits(inst); + try self.ensureProcessDeathCapacity(@popCount(@TypeOf(tomb_bits), tomb_bits)); const mcv = try self.genFuncInst(inst); - if (!inst.isUnused()) { - log.debug("{*} => {}", .{ inst, mcv }); + if (!self.liveness.isUnused(inst)) { + log.debug("{} => {}", .{ inst, mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.putNoClobber(self.gpa, inst, mcv); } + // TODO inline this logic into every instruction var i: ir.Inst.DeathsBitIndex = 0; while (inst.getOperand(i)) |operand| : (i += 1) { if (inst.operandDies(i)) @@ -785,8 +787,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } /// Asserts there is already capacity to insert into top branch inst_table. - fn processDeath(self: *Self, inst: *ir.Inst) void { - if (inst.tag == .constant) return; // Constants are immortal. + fn processDeath(self: *Self, inst: Air.Inst.Index) void { + const air_tags = self.air.instructions.items(.tag); + if (air_tags[inst] == .constant) return; // Constants are immortal. // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -827,74 +830,82 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genFuncInst(self: *Self, inst: *ir.Inst) !MCValue { - switch (inst.tag) { - .add => return self.genAdd(inst.castTag(.add).?), + fn genFuncInst(self: *Self, inst: Air.Inst.Index) !MCValue { + const air_tags = self.air.instructions.items(.tag); + switch (air_tags[inst]) { + // zig fmt: off + .add => return self.genAdd(inst.castTag(.add).?), .addwrap => return self.genAddWrap(inst.castTag(.addwrap).?), - .alloc => return self.genAlloc(inst.castTag(.alloc).?), - .arg => return self.genArg(inst.castTag(.arg).?), - .assembly => return self.genAsm(inst.castTag(.assembly).?), - .bitcast => return self.genBitCast(inst.castTag(.bitcast).?), - .bit_and => return self.genBitAnd(inst.castTag(.bit_and).?), - .bit_or => return self.genBitOr(inst.castTag(.bit_or).?), - .block => return self.genBlock(inst.castTag(.block).?), - .br => return self.genBr(inst.castTag(.br).?), - .br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), - .breakpoint => return self.genBreakpoint(inst.src), - .br_void => return self.genBrVoid(inst.castTag(.br_void).?), - .bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), - .bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), - .call => return self.genCall(inst.castTag(.call).?), - .cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt), + .sub => return self.genSub(inst.castTag(.sub).?), + .subwrap => return self.genSubWrap(inst.castTag(.subwrap).?), + .mul => return self.genMul(inst.castTag(.mul).?), + .mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?), + .div => return self.genDiv(inst.castTag(.div).?), + + .cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt), .cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte), - .cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq), + .cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq), .cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte), - .cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt), + .cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt), .cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq), - .condbr => return self.genCondBr(inst.castTag(.condbr).?), - .constant => unreachable, // excluded from function bodies - .dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?), - .floatcast => return self.genFloatCast(inst.castTag(.floatcast).?), - .intcast => return self.genIntCast(inst.castTag(.intcast).?), - .is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?), + + .bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), + .bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), + .bit_and => return self.genBitAnd(inst.castTag(.bit_and).?), + .bit_or => return self.genBitOr(inst.castTag(.bit_or).?), + .xor => return self.genXor(inst.castTag(.xor).?), + + .alloc => return self.genAlloc(inst.castTag(.alloc).?), + .arg => return self.genArg(inst.castTag(.arg).?), + .assembly => return self.genAsm(inst.castTag(.assembly).?), + .bitcast => return self.genBitCast(inst.castTag(.bitcast).?), + .block => return self.genBlock(inst.castTag(.block).?), + .br => return self.genBr(inst.castTag(.br).?), + .br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), + .breakpoint => return self.genBreakpoint(inst.src), + .call => return self.genCall(inst.castTag(.call).?), + .cond_br => return self.genCondBr(inst.castTag(.condbr).?), + .dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?), + .floatcast => return self.genFloatCast(inst.castTag(.floatcast).?), + .intcast => return self.genIntCast(inst.castTag(.intcast).?), + .is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?), .is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?), - .is_null => return self.genIsNull(inst.castTag(.is_null).?), - .is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), - .is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?), - .is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?), - .is_err => return self.genIsErr(inst.castTag(.is_err).?), - .is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), - .load => return self.genLoad(inst.castTag(.load).?), - .loop => return self.genLoop(inst.castTag(.loop).?), - .not => return self.genNot(inst.castTag(.not).?), - .mul => return self.genMul(inst.castTag(.mul).?), - .mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?), - .div => return self.genDiv(inst.castTag(.div).?), - .ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?), - .ref => return self.genRef(inst.castTag(.ref).?), - .ret => return self.genRet(inst.castTag(.ret).?), - .retvoid => return self.genRetVoid(inst.castTag(.retvoid).?), - .store => return self.genStore(inst.castTag(.store).?), - .struct_field_ptr => return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), - .sub => return self.genSub(inst.castTag(.sub).?), - .subwrap => return self.genSubWrap(inst.castTag(.subwrap).?), - .switchbr => return self.genSwitch(inst.castTag(.switchbr).?), - .unreach => return MCValue{ .unreach = {} }, - .optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), - .optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), - .unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?), - .unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?), - .unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?), - .unwrap_errunion_payload_ptr => return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?), - .wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), + .is_null => return self.genIsNull(inst.castTag(.is_null).?), + .is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), + .is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?), + .is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?), + .is_err => return self.genIsErr(inst.castTag(.is_err).?), + .is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), + .load => return self.genLoad(inst.castTag(.load).?), + .loop => return self.genLoop(inst.castTag(.loop).?), + .not => return self.genNot(inst.castTag(.not).?), + .ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?), + .ref => return self.genRef(inst.castTag(.ref).?), + .ret => return self.genRet(inst.castTag(.ret).?), + .store => return self.genStore(inst.castTag(.store).?), + .struct_field_ptr=> return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), + .switchbr => return self.genSwitch(inst.castTag(.switchbr).?), + .varptr => return self.genVarPtr(inst.castTag(.varptr).?), + + .constant => unreachable, // excluded from function bodies + .unreach => return MCValue{ .unreach = {} }, + + .optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), + .optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), + .unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?), + .unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?), + .unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?), + .unwrap_errunion_payload_ptr=> return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?), + + .wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), .wrap_errunion_payload => return self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), - .wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?), - .varptr => return self.genVarPtr(inst.castTag(.varptr).?), - .xor => return self.genXor(inst.castTag(.xor).?), + .wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?), + + // zig fmt: on } } - fn allocMem(self: *Self, inst: *ir.Inst, abi_size: u32, abi_align: u32) !u32 { + fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 { if (abi_align > self.stack_align) self.stack_align = abi_align; // TODO find a free slot instead of always appending @@ -910,20 +921,20 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } /// Use a pointer instruction as the basis for allocating stack memory. - fn allocMemPtr(self: *Self, inst: *ir.Inst) !u32 { - const elem_ty = inst.ty.elemType(); + fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { + const elem_ty = self.air.getType(inst).elemType(); const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { - return self.fail(inst.src, "type '{}' too big to fit into stack frame", .{elem_ty}); + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty}); }; // TODO swap this for inst.ty.ptrAlign const abi_align = elem_ty.abiAlignment(self.target.*); return self.allocMem(inst, abi_size, abi_align); } - fn allocRegOrMem(self: *Self, inst: *ir.Inst, reg_ok: bool) !MCValue { + fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const elem_ty = inst.ty; const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { - return self.fail(inst.src, "type '{}' too big to fit into stack frame", .{elem_ty}); + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty}); }; const abi_align = elem_ty.abiAlignment(self.target.*); if (abi_align > self.stack_align) @@ -943,72 +954,75 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return MCValue{ .stack_offset = stack_offset }; } - pub fn spillInstruction(self: *Self, src: LazySrcLoc, reg: Register, inst: *ir.Inst) !void { + pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { const stack_mcv = try self.allocRegOrMem(inst, false); log.debug("spilling {*} to stack mcv {any}", .{ inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); assert(reg == toCanonicalReg(reg_mcv.register)); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(src, inst.ty, stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(inst.ty, stack_mcv.stack_offset, reg_mcv); } /// Copies a value to a register without tracking the register. The register is not considered /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. - fn copyToTmpRegister(self: *Self, src: LazySrcLoc, ty: Type, mcv: MCValue) !Register { + fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { const reg = try self.register_manager.allocReg(null, &.{}); - try self.genSetReg(src, ty, reg, mcv); + try self.genSetReg(ty, reg, mcv); return reg; } /// Allocates a new register and copies `mcv` into it. /// `reg_owner` is the instruction that gets associated with the register in the register table. /// This can have a side effect of spilling instructions to the stack to free up a register. - fn copyToNewRegister(self: *Self, reg_owner: *ir.Inst, mcv: MCValue) !MCValue { + fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { const reg = try self.register_manager.allocReg(reg_owner, &.{}); - try self.genSetReg(reg_owner.src, reg_owner.ty, reg, mcv); + try self.genSetReg(reg_owner.ty, reg, mcv); return MCValue{ .register = reg }; } - fn genAlloc(self: *Self, inst: *ir.Inst.NoOp) !MCValue { - const stack_offset = try self.allocMemPtr(&inst.base); + fn genAlloc(self: *Self, inst: Air.Inst.Index) !MCValue { + const stack_offset = try self.allocMemPtr(inst); return MCValue{ .ptr_stack_offset = stack_offset }; } - fn genFloatCast(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genFloatCast(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement floatCast for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement floatCast for {}", .{self.target.cpu.arch}), } } - fn genIntCast(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genIntCast(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; - const operand = try self.resolveInst(inst.operand); - const info_a = inst.operand.ty.intInfo(self.target.*); - const info_b = inst.base.ty.intInfo(self.target.*); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand_ty = self.air.getType(ty_op.operand); + const operand = try self.resolveInst(ty_op.operand); + const info_a = operand_ty.intInfo(self.target.*); + const info_b = self.air.getType(inst).intInfo(self.target.*); if (info_a.signedness != info_b.signedness) - return self.fail(inst.base.src, "TODO gen intcast sign safety in semantic analysis", .{}); + return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); if (info_a.bits == info_b.bits) return operand; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement intCast for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement intCast for {}", .{self.target.cpu.arch}), } } - fn genNot(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genNot(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; - const operand = try self.resolveInst(inst.operand); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = try self.resolveInst(ty_op.operand); switch (operand) { .dead => unreachable, .unreach => unreachable, @@ -1037,216 +1051,209 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (arch) { .x86_64 => { - var imm = ir.Inst.Constant{ - .base = .{ - .tag = .constant, - .deaths = 0, - .ty = inst.operand.ty, - .src = inst.operand.src, - }, - .val = Value.initTag(.bool_true), - }; - return try self.genX8664BinMath(&inst.base, inst.operand, &imm.base); + return try self.genX8664BinMath(inst, ty_op.operand, .bool_true); }, .arm, .armeb => { - var imm = ir.Inst.Constant{ - .base = .{ - .tag = .constant, - .deaths = 0, - .ty = inst.operand.ty, - .src = inst.operand.src, - }, - .val = Value.initTag(.bool_true), - }; - return try self.genArmBinOp(&inst.base, inst.operand, &imm.base, .not); + return try self.genArmBinOp(inst, ty_op.operand, .bool_true, .not); }, - else => return self.fail(inst.base.src, "TODO implement NOT for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement NOT for {}", .{self.target.cpu.arch}), } } - fn genAdd(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genAdd(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; switch (arch) { .x86_64 => { - return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs); + return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs); }, - .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .add), - else => return self.fail(inst.base.src, "TODO implement add for {}", .{self.target.cpu.arch}), + .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .add), + else => return self.fail("TODO implement add for {}", .{self.target.cpu.arch}), } } - fn genAddWrap(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genAddWrap(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + _ = bin_op; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement addwrap for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement addwrap for {}", .{self.target.cpu.arch}), } } - fn genMul(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genMul(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; switch (arch) { - .x86_64 => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs), - .arm, .armeb => return try self.genArmMul(&inst.base, inst.lhs, inst.rhs), - else => return self.fail(inst.base.src, "TODO implement mul for {}", .{self.target.cpu.arch}), + .x86_64 => return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .arm, .armeb => return try self.genArmMul(inst, bin_op.lhs, bin_op.rhs), + else => return self.fail("TODO implement mul for {}", .{self.target.cpu.arch}), } } - fn genMulWrap(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genMulWrap(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + _ = bin_op; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement mulwrap for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement mulwrap for {}", .{self.target.cpu.arch}), } } - fn genDiv(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genDiv(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + _ = bin_op; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement div for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement div for {}", .{self.target.cpu.arch}), } } - fn genBitAnd(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genBitAnd(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; switch (arch) { - .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bit_and), - else => return self.fail(inst.base.src, "TODO implement bitwise and for {}", .{self.target.cpu.arch}), + .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_and), + else => return self.fail("TODO implement bitwise and for {}", .{self.target.cpu.arch}), } } - fn genBitOr(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genBitOr(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; switch (arch) { - .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bit_or), - else => return self.fail(inst.base.src, "TODO implement bitwise or for {}", .{self.target.cpu.arch}), + .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_or), + else => return self.fail("TODO implement bitwise or for {}", .{self.target.cpu.arch}), } } - fn genXor(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genXor(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; switch (arch) { - .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .xor), - else => return self.fail(inst.base.src, "TODO implement xor for {}", .{self.target.cpu.arch}), + .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .xor), + else => return self.fail("TODO implement xor for {}", .{self.target.cpu.arch}), } } - fn genOptionalPayload(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genOptionalPayload(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement .optional_payload for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement .optional_payload for {}", .{self.target.cpu.arch}), } } - fn genOptionalPayloadPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement .optional_payload_ptr for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement .optional_payload_ptr for {}", .{self.target.cpu.arch}), } } - fn genUnwrapErrErr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement unwrap error union error for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement unwrap error union error for {}", .{self.target.cpu.arch}), } } - fn genUnwrapErrPayload(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement unwrap error union payload for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement unwrap error union payload for {}", .{self.target.cpu.arch}), } } // *(E!T) -> E - fn genUnwrapErrErrPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement unwrap error union error ptr for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement unwrap error union error ptr for {}", .{self.target.cpu.arch}), } } // *(E!T) -> *T - fn genUnwrapErrPayloadPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement unwrap error union payload ptr for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement unwrap error union payload ptr for {}", .{self.target.cpu.arch}), } } - fn genWrapOptional(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - const optional_ty = inst.base.ty; - + fn genWrapOptional(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const optional_ty = self.air.getType(inst); + // Optional type is just a boolean true if (optional_ty.abiSize(self.target.*) == 1) return MCValue{ .immediate = 1 }; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement wrap optional for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}), } } /// T to E!T - fn genWrapErrUnionPayload(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement wrap errunion payload for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement wrap errunion payload for {}", .{self.target.cpu.arch}), } } /// E to E!T - fn genWrapErrUnionErr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement wrap errunion error for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement wrap errunion error for {}", .{self.target.cpu.arch}), } } - fn genVarPtr(self: *Self, inst: *ir.Inst.VarPtr) !MCValue { + fn genVarPtr(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement varptr for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement varptr for {}", .{self.target.cpu.arch}), } } - fn reuseOperand(self: *Self, inst: *ir.Inst, op_index: ir.Inst.DeathsBitIndex, mcv: MCValue) bool { - if (!inst.operandDies(op_index)) + fn reuseOperand(self: *Self, inst: Air.Inst.Index, op_index: u2, mcv: MCValue) bool { + if (!self.liveness.operandDies(inst, op_index)) return false; switch (mcv) { @@ -1258,16 +1265,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.register_manager.registers[index] = inst; } } - log.debug("reusing {} => {*}", .{ reg, inst }); + log.debug("reusing {} => {}", .{ reg, inst }); }, .stack_offset => |off| { - log.debug("reusing stack offset {} => {*}", .{ off, inst }); + log.debug("reusing stack offset {} => {}", .{ off, inst }); }, else => return false, } // Prevent the operand deaths processing code from deallocating it. - inst.clearOperandDeath(op_index); + self.liveness.clearOperandDeath(inst, op_index); // That makes us responsible for doing the rest of the stuff that processDeath would have done. const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -1276,22 +1283,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return true; } - fn genLoad(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - const elem_ty = inst.base.ty; - if (!elem_ty.hasCodeGenBits()) - return MCValue.none; - const ptr = try self.resolveInst(inst.operand); - const is_volatile = inst.operand.ty.isVolatilePtr(); - if (inst.base.isUnused() and !is_volatile) - return MCValue.dead; - const dst_mcv: MCValue = blk: { - if (self.reuseOperand(&inst.base, 0, ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk ptr; - } else { - break :blk try self.allocRegOrMem(&inst.base, true); - } - }; + fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue) !void { switch (ptr) { .none => unreachable, .undef => unreachable, @@ -1299,31 +1291,51 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .dead => unreachable, .compare_flags_unsigned => unreachable, .compare_flags_signed => unreachable, - .immediate => |imm| try self.setRegOrMem(inst.base.src, elem_ty, dst_mcv, .{ .memory = imm }), - .ptr_stack_offset => |off| try self.setRegOrMem(inst.base.src, elem_ty, dst_mcv, .{ .stack_offset = off }), + .immediate => |imm| try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm }), + .ptr_stack_offset => |off| try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off }), .ptr_embedded_in_code => |off| { - try self.setRegOrMem(inst.base.src, elem_ty, dst_mcv, .{ .embedded_in_code = off }); + try self.setRegOrMem(elem_ty, dst_mcv, .{ .embedded_in_code = off }); }, .embedded_in_code => { - return self.fail(inst.base.src, "TODO implement loading from MCValue.embedded_in_code", .{}); + return self.fail("TODO implement loading from MCValue.embedded_in_code", .{}); }, .register => { - return self.fail(inst.base.src, "TODO implement loading from MCValue.register", .{}); + return self.fail("TODO implement loading from MCValue.register", .{}); }, .memory => { - return self.fail(inst.base.src, "TODO implement loading from MCValue.memory", .{}); + return self.fail("TODO implement loading from MCValue.memory", .{}); }, .stack_offset => { - return self.fail(inst.base.src, "TODO implement loading from MCValue.stack_offset", .{}); + return self.fail("TODO implement loading from MCValue.stack_offset", .{}); }, } + } + + fn genLoad(self: *Self, inst: Air.Inst.Index) !MCValue { + const elem_ty = self.air.getType(inst); + if (!elem_ty.hasCodeGenBits()) + return MCValue.none; + const ptr = try self.resolveInst(inst.operand); + const is_volatile = inst.operand.ty.isVolatilePtr(); + if (self.liveness.isUnused(inst) and !is_volatile) + return MCValue.dead; + const dst_mcv: MCValue = blk: { + if (self.reuseOperand(inst, 0, ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + self.load(dst_mcv, ptr); return dst_mcv; } - fn genStore(self: *Self, inst: *ir.Inst.BinOp) !MCValue { - const ptr = try self.resolveInst(inst.lhs); - const value = try self.resolveInst(inst.rhs); - const elem_ty = inst.rhs.ty; + fn genStore(self: *Self, inst: Air.Inst.Index) !MCValue { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const ptr = try self.resolveInst(bin_op.lhs); + const value = try self.resolveInst(bin_op.rhs); + const elem_ty = self.getType(bin_op.rhs); switch (ptr) { .none => unreachable, .undef => unreachable, @@ -1332,57 +1344,60 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned => unreachable, .compare_flags_signed => unreachable, .immediate => |imm| { - try self.setRegOrMem(inst.base.src, elem_ty, .{ .memory = imm }, value); + try self.setRegOrMem(elem_ty, .{ .memory = imm }, value); }, .ptr_stack_offset => |off| { - try self.genSetStack(inst.base.src, elem_ty, off, value); + try self.genSetStack(elem_ty, off, value); }, .ptr_embedded_in_code => |off| { - try self.setRegOrMem(inst.base.src, elem_ty, .{ .embedded_in_code = off }, value); + try self.setRegOrMem(elem_ty, .{ .embedded_in_code = off }, value); }, .embedded_in_code => { - return self.fail(inst.base.src, "TODO implement storing to MCValue.embedded_in_code", .{}); + return self.fail("TODO implement storing to MCValue.embedded_in_code", .{}); }, .register => { - return self.fail(inst.base.src, "TODO implement storing to MCValue.register", .{}); + return self.fail("TODO implement storing to MCValue.register", .{}); }, .memory => { - return self.fail(inst.base.src, "TODO implement storing to MCValue.memory", .{}); + return self.fail("TODO implement storing to MCValue.memory", .{}); }, .stack_offset => { - return self.fail(inst.base.src, "TODO implement storing to MCValue.stack_offset", .{}); + return self.fail("TODO implement storing to MCValue.stack_offset", .{}); }, } return .none; } - fn genStructFieldPtr(self: *Self, inst: *ir.Inst.StructFieldPtr) !MCValue { - return self.fail(inst.base.src, "TODO implement codegen struct_field_ptr", .{}); + fn genStructFieldPtr(self: *Self, inst: Air.Inst.Index) !MCValue { + const struct_field_ptr = self.air.instructions.items(.data)[inst].struct_field_ptr; + _ = struct_field_ptr; + return self.fail("TODO implement codegen struct_field_ptr", .{}); } - fn genSub(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genSub(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; switch (arch) { - .x86_64 => { - return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs); - }, - .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .sub), - else => return self.fail(inst.base.src, "TODO implement sub for {}", .{self.target.cpu.arch}), + .x86_64 => return self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .arm, .armeb => return self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .sub), + else => return self.fail("TODO implement sub for {}", .{self.target.cpu.arch}), } } - fn genSubWrap(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genSubWrap(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + _ = bin_op; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement subwrap for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement subwrap for {}", .{self.target.cpu.arch}), } } - fn armOperandShouldBeRegister(self: *Self, src: LazySrcLoc, mcv: MCValue) !bool { + fn armOperandShouldBeRegister(self: *Self, mcv: MCValue) !bool { return switch (mcv) { .none => unreachable, .undef => unreachable, @@ -1392,7 +1407,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .ptr_stack_offset => unreachable, .ptr_embedded_in_code => unreachable, .immediate => |imm| blk: { - if (imm > std.math.maxInt(u32)) return self.fail(src, "TODO ARM binary arithmetic immediate larger than u32", .{}); + if (imm > std.math.maxInt(u32)) return self.fail("TODO ARM binary arithmetic immediate larger than u32", .{}); // Load immediate into register if it doesn't fit // in an operand @@ -1406,14 +1421,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }; } - fn genArmBinOp(self: *Self, inst: *ir.Inst, op_lhs: *ir.Inst, op_rhs: *ir.Inst, op: ir.Inst.Tag) !MCValue { + fn genArmBinOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref, op: ir.Inst.Tag) !MCValue { const lhs = try self.resolveInst(op_lhs); const rhs = try self.resolveInst(op_rhs); const lhs_is_register = lhs == .register; const rhs_is_register = rhs == .register; - const lhs_should_be_register = try self.armOperandShouldBeRegister(op_lhs.src, lhs); - const rhs_should_be_register = try self.armOperandShouldBeRegister(op_rhs.src, rhs); + const lhs_should_be_register = try self.armOperandShouldBeRegister(lhs); + const rhs_should_be_register = try self.armOperandShouldBeRegister(rhs); const reuse_lhs = lhs_is_register and self.reuseOperand(inst, 0, lhs); const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, 1, rhs); @@ -1486,14 +1501,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Move the operands to the newly allocated registers if (lhs_mcv == .register and !lhs_is_register) { - try self.genSetReg(op_lhs.src, op_lhs.ty, lhs_mcv.register, lhs); + try self.genSetReg(op_lhs.ty, lhs_mcv.register, lhs); } if (rhs_mcv == .register and !rhs_is_register) { - try self.genSetReg(op_rhs.src, op_rhs.ty, rhs_mcv.register, rhs); + try self.genSetReg(op_rhs.ty, rhs_mcv.register, rhs); } try self.genArmBinOpCode( - inst.src, dst_mcv.register, lhs_mcv, rhs_mcv, @@ -1505,14 +1519,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genArmBinOpCode( self: *Self, - src: LazySrcLoc, dst_reg: Register, lhs_mcv: MCValue, rhs_mcv: MCValue, swap_lhs_and_rhs: bool, op: ir.Inst.Tag, ) !void { - _ = src; assert(lhs_mcv == .register or rhs_mcv == .register); const op1 = if (swap_lhs_and_rhs) rhs_mcv.register else lhs_mcv.register; @@ -1561,7 +1573,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genArmMul(self: *Self, inst: *ir.Inst, op_lhs: *ir.Inst, op_rhs: *ir.Inst) !MCValue { + fn genArmMul(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Index, op_rhs: Air.Inst.Index) !MCValue { const lhs = try self.resolveInst(op_lhs); const rhs = try self.resolveInst(op_rhs); @@ -1618,10 +1630,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Move the operands to the newly allocated registers if (!lhs_is_register) { - try self.genSetReg(op_lhs.src, op_lhs.ty, lhs_mcv.register, lhs); + try self.genSetReg(op_lhs.ty, lhs_mcv.register, lhs); } if (!rhs_is_register) { - try self.genSetReg(op_rhs.src, op_rhs.ty, rhs_mcv.register, rhs); + try self.genSetReg(op_rhs.ty, rhs_mcv.register, rhs); } writeInt(u32, try self.code.addManyAsArray(4), Instruction.mul(.al, dst_mcv.register, lhs_mcv.register, rhs_mcv.register).toU32()); @@ -1631,7 +1643,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// Perform "binary" operators, excluding comparisons. /// Currently, the following ops are supported: /// ADD, SUB, XOR, OR, AND - fn genX8664BinMath(self: *Self, inst: *ir.Inst, op_lhs: *ir.Inst, op_rhs: *ir.Inst) !MCValue { + fn genX8664BinMath(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref) !MCValue { // We'll handle these ops in two steps. // 1) Prepare an output location (register or memory) // This location will be the location of the operand that dies (if one exists) @@ -1654,7 +1666,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // as the result MCValue. var dst_mcv: MCValue = undefined; var src_mcv: MCValue = undefined; - var src_inst: *ir.Inst = undefined; + var src_inst: Air.Inst.Index = undefined; if (self.reuseOperand(inst, 0, lhs)) { // LHS dies; use it as the destination. // Both operands cannot be memory. @@ -1696,20 +1708,21 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (src_mcv) { .immediate => |imm| { if (imm > math.maxInt(u31)) { - src_mcv = MCValue{ .register = try self.copyToTmpRegister(src_inst.src, Type.initTag(.u64), src_mcv) }; + src_mcv = MCValue{ .register = try self.copyToTmpRegister(Type.initTag(.u64), src_mcv) }; } }, else => {}, } // Now for step 2, we perform the actual op - switch (inst.tag) { + const air_tags = self.air.instructions.items(.tag); + switch (air_tags[inst]) { // TODO: Generate wrapping and non-wrapping versions separately - .add, .addwrap => try self.genX8664BinMathCode(inst.src, inst.ty, dst_mcv, src_mcv, 0, 0x00), - .bool_or, .bit_or => try self.genX8664BinMathCode(inst.src, inst.ty, dst_mcv, src_mcv, 1, 0x08), - .bool_and, .bit_and => try self.genX8664BinMathCode(inst.src, inst.ty, dst_mcv, src_mcv, 4, 0x20), - .sub, .subwrap => try self.genX8664BinMathCode(inst.src, inst.ty, dst_mcv, src_mcv, 5, 0x28), - .xor, .not => try self.genX8664BinMathCode(inst.src, inst.ty, dst_mcv, src_mcv, 6, 0x30), + .add, .addwrap => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 0, 0x00), + .bool_or, .bit_or => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 1, 0x08), + .bool_and, .bit_and => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 4, 0x20), + .sub, .subwrap => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 5, 0x28), + .xor, .not => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 6, 0x30), .mul, .mulwrap => try self.genX8664Imul(inst.src, inst.ty, dst_mcv, src_mcv), else => unreachable, @@ -1719,16 +1732,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } /// Wrap over Instruction.encodeInto to translate errors - fn encodeX8664Instruction( - self: *Self, - src: LazySrcLoc, - inst: Instruction, - ) !void { + fn encodeX8664Instruction(self: *Self, inst: Instruction) !void { inst.encodeInto(self.code) catch |err| { if (err == error.OutOfMemory) return error.OutOfMemory else - return self.fail(src, "Instruction.encodeInto failed because {s}", .{@errorName(err)}); + return self.fail("Instruction.encodeInto failed because {s}", .{@errorName(err)}); }; } @@ -1800,7 +1809,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// d3 /opx | *r/m16/32/64*, CL (for context, CL is register 1) fn genX8664BinMathCode( self: *Self, - src: LazySrcLoc, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue, @@ -1818,7 +1826,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .register => |dst_reg| { switch (src_mcv) { .none => unreachable, - .undef => try self.genSetReg(src, dst_ty, dst_reg, .undef), + .undef => try self.genSetReg(dst_ty, dst_reg, .undef), .dead, .unreach => unreachable, .ptr_stack_offset => unreachable, .ptr_embedded_in_code => unreachable, @@ -1872,7 +1880,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } }, .embedded_in_code, .memory => { - return self.fail(src, "TODO implement x86 ADD/SUB/CMP source memory", .{}); + return self.fail("TODO implement x86 ADD/SUB/CMP source memory", .{}); }, .stack_offset => |off| { // register, indirect use mr + 3 @@ -1880,7 +1888,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const abi_size = dst_ty.abiSize(self.target.*); const adj_off = off + abi_size; if (off > math.maxInt(i32)) { - return self.fail(src, "stack offset too large", .{}); + return self.fail("stack offset too large", .{}); } const encoder = try X8664Encoder.init(self.code, 7); encoder.rex(.{ @@ -1903,17 +1911,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } }, .compare_flags_unsigned => { - return self.fail(src, "TODO implement x86 ADD/SUB/CMP source compare flag (unsigned)", .{}); + return self.fail("TODO implement x86 ADD/SUB/CMP source compare flag (unsigned)", .{}); }, .compare_flags_signed => { - return self.fail(src, "TODO implement x86 ADD/SUB/CMP source compare flag (signed)", .{}); + return self.fail("TODO implement x86 ADD/SUB/CMP source compare flag (signed)", .{}); }, } }, .stack_offset => |off| { switch (src_mcv) { .none => unreachable, - .undef => return self.genSetStack(src, dst_ty, off, .undef), + .undef => return self.genSetStack(dst_ty, off, .undef), .dead, .unreach => unreachable, .ptr_stack_offset => unreachable, .ptr_embedded_in_code => unreachable, @@ -1922,21 +1930,21 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, .immediate => |imm| { _ = imm; - return self.fail(src, "TODO implement x86 ADD/SUB/CMP source immediate", .{}); + return self.fail("TODO implement x86 ADD/SUB/CMP source immediate", .{}); }, .embedded_in_code, .memory, .stack_offset => { - return self.fail(src, "TODO implement x86 ADD/SUB/CMP source memory", .{}); + return self.fail("TODO implement x86 ADD/SUB/CMP source memory", .{}); }, .compare_flags_unsigned => { - return self.fail(src, "TODO implement x86 ADD/SUB/CMP source compare flag (unsigned)", .{}); + return self.fail("TODO implement x86 ADD/SUB/CMP source compare flag (unsigned)", .{}); }, .compare_flags_signed => { - return self.fail(src, "TODO implement x86 ADD/SUB/CMP source compare flag (signed)", .{}); + return self.fail("TODO implement x86 ADD/SUB/CMP source compare flag (signed)", .{}); }, } }, .embedded_in_code, .memory => { - return self.fail(src, "TODO implement x86 ADD/SUB/CMP destination memory", .{}); + return self.fail("TODO implement x86 ADD/SUB/CMP destination memory", .{}); }, } } @@ -1960,7 +1968,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .register => |dst_reg| { switch (src_mcv) { .none => unreachable, - .undef => try self.genSetReg(src, dst_ty, dst_reg, .undef), + .undef => try self.genSetReg(dst_ty, dst_reg, .undef), .dead, .unreach => unreachable, .ptr_stack_offset => unreachable, .ptr_embedded_in_code => unreachable, @@ -2026,31 +2034,31 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { ); encoder.imm32(@intCast(i32, imm)); } else { - const src_reg = try self.copyToTmpRegister(src, dst_ty, src_mcv); + const src_reg = try self.copyToTmpRegister(dst_ty, src_mcv); return self.genX8664Imul(src, dst_ty, dst_mcv, MCValue{ .register = src_reg }); } }, .embedded_in_code, .memory, .stack_offset => { - return self.fail(src, "TODO implement x86 multiply source memory", .{}); + return self.fail("TODO implement x86 multiply source memory", .{}); }, .compare_flags_unsigned => { - return self.fail(src, "TODO implement x86 multiply source compare flag (unsigned)", .{}); + return self.fail("TODO implement x86 multiply source compare flag (unsigned)", .{}); }, .compare_flags_signed => { - return self.fail(src, "TODO implement x86 multiply source compare flag (signed)", .{}); + return self.fail("TODO implement x86 multiply source compare flag (signed)", .{}); }, } }, .stack_offset => |off| { switch (src_mcv) { .none => unreachable, - .undef => return self.genSetStack(src, dst_ty, off, .undef), + .undef => return self.genSetStack(dst_ty, off, .undef), .dead, .unreach => unreachable, .ptr_stack_offset => unreachable, .ptr_embedded_in_code => unreachable, .register => |src_reg| { // copy dst to a register - const dst_reg = try self.copyToTmpRegister(src, dst_ty, dst_mcv); + const dst_reg = try self.copyToTmpRegister(dst_ty, dst_mcv); // multiply into dst_reg // register, register // Use the following imul opcode @@ -2068,34 +2076,34 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { src_reg.low_id(), ); // copy dst_reg back out - return self.genSetStack(src, dst_ty, off, MCValue{ .register = dst_reg }); + return self.genSetStack(dst_ty, off, MCValue{ .register = dst_reg }); }, .immediate => |imm| { _ = imm; - return self.fail(src, "TODO implement x86 multiply source immediate", .{}); + return self.fail("TODO implement x86 multiply source immediate", .{}); }, .embedded_in_code, .memory, .stack_offset => { - return self.fail(src, "TODO implement x86 multiply source memory", .{}); + return self.fail("TODO implement x86 multiply source memory", .{}); }, .compare_flags_unsigned => { - return self.fail(src, "TODO implement x86 multiply source compare flag (unsigned)", .{}); + return self.fail("TODO implement x86 multiply source compare flag (unsigned)", .{}); }, .compare_flags_signed => { - return self.fail(src, "TODO implement x86 multiply source compare flag (signed)", .{}); + return self.fail("TODO implement x86 multiply source compare flag (signed)", .{}); }, } }, .embedded_in_code, .memory => { - return self.fail(src, "TODO implement x86 multiply destination memory", .{}); + return self.fail("TODO implement x86 multiply destination memory", .{}); }, } } - fn genX8664ModRMRegToStack(self: *Self, src: LazySrcLoc, ty: Type, off: u32, reg: Register, opcode: u8) !void { + fn genX8664ModRMRegToStack(self: *Self, ty: Type, off: u32, reg: Register, opcode: u8) !void { const abi_size = ty.abiSize(self.target.*); const adj_off = off + abi_size; if (off > math.maxInt(i32)) { - return self.fail(src, "stack offset too large", .{}); + return self.fail("stack offset too large", .{}); } const i_adj_off = -@intCast(i32, adj_off); @@ -2122,8 +2130,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genArgDbgInfo(self: *Self, inst: *ir.Inst.Arg, mcv: MCValue) !void { + fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, mcv: MCValue) !void { const name_with_null = inst.name[0 .. mem.lenZ(inst.name) + 1]; + const ty = self.air.getType(inst); switch (mcv) { .register => |reg| { @@ -2136,7 +2145,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { reg.dwarfLocOp(), }); try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 5 + name_with_null.len); - try self.addDbgInfoTypeReloc(inst.base.ty); // DW.AT_type, DW.FORM_ref4 + try self.addDbgInfoTypeReloc(ty); // DW.AT_type, DW.FORM_ref4 dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string }, .none => {}, @@ -2147,12 +2156,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .dwarf => |dbg_out| { switch (arch) { .arm, .armeb => { - const ty = inst.base.ty; const abi_size = math.cast(u32, ty.abiSize(self.target.*)) catch { - return self.fail(inst.base.src, "type '{}' too big to fit into stack frame", .{ty}); + return self.fail("type '{}' too big to fit into stack frame", .{ty}); }; const adjusted_stack_offset = math.negateCast(offset + abi_size) catch { - return self.fail(inst.base.src, "Stack offset too large for arguments", .{}); + return self.fail("Stack offset too large for arguments", .{}); }; try dbg_out.dbg_info.append(link.File.Elf.abbrev_parameter); @@ -2168,7 +2176,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try leb128.writeILEB128(dbg_out.dbg_info.writer(), adjusted_stack_offset); try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 5 + name_with_null.len); - try self.addDbgInfoTypeReloc(inst.base.ty); // DW.AT_type, DW.FORM_ref4 + try self.addDbgInfoTypeReloc(ty); // DW.AT_type, DW.FORM_ref4 dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string }, else => {}, @@ -2181,23 +2189,24 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genArg(self: *Self, inst: *ir.Inst.Arg) !MCValue { + fn genArg(self: *Self, inst: Air.Inst.Index) !MCValue { const arg_index = self.arg_index; self.arg_index += 1; + const ty = self.air.getType(inst); + const result = self.args[arg_index]; const mcv = switch (arch) { // TODO support stack-only arguments on all target architectures .arm, .armeb, .aarch64, .aarch64_32, .aarch64_be => switch (result) { // Copy registers to the stack .register => |reg| blk: { - const ty = inst.base.ty; const abi_size = math.cast(u32, ty.abiSize(self.target.*)) catch { - return self.fail(inst.base.src, "type '{}' too big to fit into stack frame", .{ty}); + return self.fail("type '{}' too big to fit into stack frame", .{ty}); }; const abi_align = ty.abiAlignment(self.target.*); - const stack_offset = try self.allocMem(&inst.base, abi_size, abi_align); - try self.genSetStack(inst.base.src, ty, stack_offset, MCValue{ .register = reg }); + const stack_offset = try self.allocMem(inst, abi_size, abi_align); + try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); break :blk MCValue{ .stack_offset = stack_offset }; }, @@ -2207,12 +2216,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }; try self.genArgDbgInfo(inst, mcv); - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (mcv) { .register => |reg| { - self.register_manager.getRegAssumeFree(toCanonicalReg(reg), &inst.base); + self.register_manager.getRegAssumeFree(toCanonicalReg(reg), inst); }, else => {}, } @@ -2220,7 +2229,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return mcv; } - fn genBreakpoint(self: *Self, src: LazySrcLoc) !MCValue { + fn genBreakpoint(self: *Self) !MCValue { switch (arch) { .i386, .x86_64 => { try self.code.append(0xcc); // int3 @@ -2234,13 +2243,20 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .aarch64 => { mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.brk(1).toU32()); }, - else => return self.fail(src, "TODO implement @breakpoint() for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement @breakpoint() for {}", .{self.target.cpu.arch}), } return .none; } - fn genCall(self: *Self, inst: *ir.Inst.Call) !MCValue { - var info = try self.resolveCallingConventionValues(inst.base.src, inst.func.ty); + fn genCall(self: *Self, inst: Air.Inst.Index) !MCValue { + const inst_datas = self.air.instructions.items(.data); + const pl_op = inst_datas[inst].pl_op; + const fn_ty = self.air.getType(pl_op.operand); + const callee = pl_op.operand; + const extra = self.air.extraData(Air.Call, inst_data.payload); + const args = self.air.extra[extra.end..][0..extra.data.args_len]; + + var info = try self.resolveCallingConventionValues(fn_ty); defer info.deinit(self); // Due to incremental compilation, how function calls are generated depends @@ -2249,26 +2265,26 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (arch) { .x86_64 => { for (info.args) |mc_arg, arg_i| { - const arg = inst.args[arg_i]; - const arg_mcv = try self.resolveInst(inst.args[arg_i]); + const arg = args[arg_i]; + const arg_mcv = try self.resolveInst(args[arg_i]); // Here we do not use setRegOrMem even though the logic is similar, because // the function call will move the stack pointer, so the offsets are different. switch (mc_arg) { .none => continue, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg.ty, reg, arg_mcv); }, .stack_offset => |off| { // Here we need to emit instructions like this: // mov qword ptr [rsp + stack_offset], x - try self.genSetStack(arg.src, arg.ty, off, arg_mcv); + try self.genSetStack(arg.ty, off, arg_mcv); }, .ptr_stack_offset => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_stack_offset arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); }, .ptr_embedded_in_code => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); }, .undef => unreachable, .immediate => unreachable, @@ -2281,7 +2297,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - if (inst.func.value()) |func_value| { + if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -2300,18 +2316,18 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 }); mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), got_addr); } else if (func_value.castTag(.extern_fn)) |_| { - return self.fail(inst.base.src, "TODO implement calling extern functions", .{}); + return self.fail("TODO implement calling extern functions", .{}); } else { - return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); + return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{}); + return self.fail("TODO implement calling runtime known function pointer", .{}); } }, .riscv64 => { - if (info.args.len > 0) return self.fail(inst.base.src, "TODO implement fn args for {}", .{self.target.cpu.arch}); + if (info.args.len > 0) return self.fail("TODO implement fn args for {}", .{self.target.cpu.arch}); - if (inst.func.value()) |func_value| { + if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -2325,21 +2341,21 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { else unreachable; - try self.genSetReg(inst.base.src, Type.initTag(.usize), .ra, .{ .memory = got_addr }); + try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr }); mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.jalr(.ra, 0, .ra).toU32()); } else if (func_value.castTag(.extern_fn)) |_| { - return self.fail(inst.base.src, "TODO implement calling extern functions", .{}); + return self.fail("TODO implement calling extern functions", .{}); } else { - return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); + return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{}); + return self.fail("TODO implement calling runtime known function pointer", .{}); } }, .arm, .armeb => { for (info.args) |mc_arg, arg_i| { - const arg = inst.args[arg_i]; - const arg_mcv = try self.resolveInst(inst.args[arg_i]); + const arg = args[arg_i]; + const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { .none => continue, @@ -2353,21 +2369,21 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned => unreachable, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg.ty, reg, arg_mcv); }, .stack_offset => { - return self.fail(inst.base.src, "TODO implement calling with parameters in memory", .{}); + return self.fail("TODO implement calling with parameters in memory", .{}); }, .ptr_stack_offset => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_stack_offset arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); }, .ptr_embedded_in_code => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); }, } } - if (inst.func.value()) |func_value| { + if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); @@ -2380,7 +2396,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { else unreachable; - try self.genSetReg(inst.base.src, Type.initTag(.usize), .lr, .{ .memory = got_addr }); + try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr }); // TODO: add Instruction.supportedOn // function for ARM @@ -2391,18 +2407,18 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.bx(.al, .lr).toU32()); } } else if (func_value.castTag(.extern_fn)) |_| { - return self.fail(inst.base.src, "TODO implement calling extern functions", .{}); + return self.fail("TODO implement calling extern functions", .{}); } else { - return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); + return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{}); + return self.fail("TODO implement calling runtime known function pointer", .{}); } }, .aarch64 => { for (info.args) |mc_arg, arg_i| { - const arg = inst.args[arg_i]; - const arg_mcv = try self.resolveInst(inst.args[arg_i]); + const arg = args[arg_i]; + const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { .none => continue, @@ -2416,21 +2432,21 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned => unreachable, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg.ty, reg, arg_mcv); }, .stack_offset => { - return self.fail(inst.base.src, "TODO implement calling with parameters in memory", .{}); + return self.fail("TODO implement calling with parameters in memory", .{}); }, .ptr_stack_offset => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_stack_offset arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); }, .ptr_embedded_in_code => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); }, } } - if (inst.func.value()) |func_value| { + if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); @@ -2443,24 +2459,24 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { else unreachable; - try self.genSetReg(inst.base.src, Type.initTag(.usize), .x30, .{ .memory = got_addr }); + try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr }); writeInt(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32()); } else if (func_value.castTag(.extern_fn)) |_| { - return self.fail(inst.base.src, "TODO implement calling extern functions", .{}); + return self.fail("TODO implement calling extern functions", .{}); } else { - return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); + return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{}); + return self.fail("TODO implement calling runtime known function pointer", .{}); } }, - else => return self.fail(inst.base.src, "TODO implement call for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement call for {}", .{self.target.cpu.arch}), } } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { for (info.args) |mc_arg, arg_i| { - const arg = inst.args[arg_i]; - const arg_mcv = try self.resolveInst(inst.args[arg_i]); + const arg = args[arg_i]; + const arg_mcv = try self.resolveInst(args[arg_i]); // Here we do not use setRegOrMem even though the logic is similar, because // the function call will move the stack pointer, so the offsets are different. switch (mc_arg) { @@ -2471,18 +2487,18 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .x86_64, .aarch64 => try self.register_manager.getReg(reg, null), else => unreachable, } - try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg.ty, reg, arg_mcv); }, .stack_offset => { // Here we need to emit instructions like this: // mov qword ptr [rsp + stack_offset], x - return self.fail(inst.base.src, "TODO implement calling with parameters in memory", .{}); + return self.fail("TODO implement calling with parameters in memory", .{}); }, .ptr_stack_offset => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_stack_offset arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); }, .ptr_embedded_in_code => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); }, .undef => unreachable, .immediate => unreachable, @@ -2495,7 +2511,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - if (inst.func.value()) |func_value| { + if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const got_addr = blk: { @@ -2506,13 +2522,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { log.debug("got_addr = 0x{x}", .{got_addr}); switch (arch) { .x86_64 => { - try self.genSetReg(inst.base.src, Type.initTag(.u64), .rax, .{ .memory = got_addr }); + try self.genSetReg(Type.initTag(.u64), .rax, .{ .memory = got_addr }); // callq *%rax try self.code.ensureCapacity(self.code.items.len + 2); self.code.appendSliceAssumeCapacity(&[2]u8{ 0xff, 0xd0 }); }, .aarch64 => { - try self.genSetReg(inst.base.src, Type.initTag(.u64), .x30, .{ .memory = got_addr }); + try self.genSetReg(Type.initTag(.u64), .x30, .{ .memory = got_addr }); // blr x30 writeInt(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32()); }, @@ -2552,35 +2568,35 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }); // We mark the space and fix it up later. } else { - return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); + return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{}); + return self.fail("TODO implement calling runtime known function pointer", .{}); } } else if (self.bin_file.cast(link.File.Plan9)) |p9| { switch (arch) { .x86_64 => { for (info.args) |mc_arg, arg_i| { - const arg = inst.args[arg_i]; - const arg_mcv = try self.resolveInst(inst.args[arg_i]); + const arg = args[arg_i]; + const arg_mcv = try self.resolveInst(args[arg_i]); // Here we do not use setRegOrMem even though the logic is similar, because // the function call will move the stack pointer, so the offsets are different. switch (mc_arg) { .none => continue, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg.ty, reg, arg_mcv); }, .stack_offset => { // Here we need to emit instructions like this: // mov qword ptr [rsp + stack_offset], x - return self.fail(inst.base.src, "TODO implement calling with parameters in memory", .{}); + return self.fail("TODO implement calling with parameters in memory", .{}); }, .ptr_stack_offset => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_stack_offset arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); }, .ptr_embedded_in_code => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); }, .undef => unreachable, .immediate => unreachable, @@ -2592,7 +2608,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned => unreachable, } } - if (inst.func.value()) |func_value| { + if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); @@ -2603,9 +2619,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 }); const fn_got_addr = got_addr + got_index * ptr_bytes; mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), @intCast(u32, fn_got_addr)); - } else return self.fail(inst.base.src, "TODO implement calling extern fn on plan9", .{}); + } else return self.fail("TODO implement calling extern fn on plan9", .{}); } else { - return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{}); + return self.fail("TODO implement calling runtime known function pointer", .{}); } }, .aarch64 => { @@ -2628,13 +2644,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); }, .stack_offset => { - return self.fail(inst.base.src, "TODO implement calling with parameters in memory", .{}); + return self.fail("TODO implement calling with parameters in memory", .{}); }, .ptr_stack_offset => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_stack_offset arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); }, .ptr_embedded_in_code => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); }, } } @@ -2650,15 +2666,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32()); } else if (func_value.castTag(.extern_fn)) |_| { - return self.fail(inst.base.src, "TODO implement calling extern functions", .{}); + return self.fail("TODO implement calling extern functions", .{}); } else { - return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); + return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{}); + return self.fail("TODO implement calling runtime known function pointer", .{}); } }, - else => return self.fail(inst.base.src, "TODO implement call on plan9 for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement call on plan9 for {}", .{self.target.cpu.arch}), } } else unreachable; @@ -2666,7 +2682,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .register => |reg| { if (Register.allocIndex(reg) == null) { // Save function return value in a callee saved register - return try self.copyToNewRegister(&inst.base, info.return_value); + return try self.copyToNewRegister(inst, info.return_value); } }, else => {}, @@ -2675,8 +2691,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return info.return_value; } - fn genRef(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - const operand = try self.resolveInst(inst.operand); + fn genRef(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand_ty = self.air.getType(ty_op.operand); + const operand = try self.resolveInst(ty_op.operand); switch (operand) { .unreach => unreachable, .dead => unreachable, @@ -2689,8 +2709,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned, .compare_flags_signed, => { - const stack_offset = try self.allocMemPtr(&inst.base); - try self.genSetStack(inst.base.src, inst.operand.ty, stack_offset, operand); + const stack_offset = try self.allocMemPtr(inst); + try self.genSetStack(operand_ty, stack_offset, operand); return MCValue{ .ptr_stack_offset = stack_offset }; }, @@ -2698,13 +2718,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .embedded_in_code => |offset| return MCValue{ .ptr_embedded_in_code = offset }, .memory => |vaddr| return MCValue{ .immediate = vaddr }, - .undef => return self.fail(inst.base.src, "TODO implement ref on an undefined value", .{}), + .undef => return self.fail("TODO implement ref on an undefined value", .{}), } } - fn ret(self: *Self, src: LazySrcLoc, mcv: MCValue) !MCValue { + fn ret(self: *Self, mcv: MCValue) !MCValue { const ret_ty = self.fn_type.fnReturnType(); - try self.setRegOrMem(src, ret_ty, self.ret_mcv, mcv); + try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); switch (arch) { .i386 => { try self.code.append(0xc3); // ret @@ -2730,58 +2750,54 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.code.resize(self.code.items.len + 4); try self.exitlude_jump_relocs.append(self.gpa, self.code.items.len - 4); }, - else => return self.fail(src, "TODO implement return for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement return for {}", .{self.target.cpu.arch}), } return .unreach; } - fn genRet(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - const operand = try self.resolveInst(inst.operand); + fn genRet(self: *Self, inst: Air.Inst.Index) !MCValue { + const operand = try self.resolveInst(self.air.instructions.items(.data)[inst].un_op); return self.ret(inst.base.src, operand); } - fn genRetVoid(self: *Self, inst: *ir.Inst.NoOp) !MCValue { - return self.ret(inst.base.src, .none); - } - - fn genCmp(self: *Self, inst: *ir.Inst.BinOp, op: math.CompareOperator) !MCValue { + fn genCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) - return MCValue{ .dead = {} }; - if (inst.lhs.ty.zigTypeTag() == .ErrorSet or inst.rhs.ty.zigTypeTag() == .ErrorSet) - return self.fail(inst.base.src, "TODO implement cmp for errors", .{}); + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const ty = self.air.getType(bin_op.lhs); + assert(ty.eql(self.air.getType(bin_op.rhs))); + if (ty.zigTypeTag() == .ErrorSet) + return self.fail("TODO implement cmp for errors", .{}); + + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); switch (arch) { .x86_64 => { try self.code.ensureCapacity(self.code.items.len + 8); - const lhs = try self.resolveInst(inst.lhs); - const rhs = try self.resolveInst(inst.rhs); - // There are 2 operands, destination and source. // Either one, but not both, can be a memory operand. // Source operand can be an immediate, 8 bits or 32 bits. const dst_mcv = if (lhs.isImmediate() or (lhs.isMemory() and rhs.isMemory())) - try self.copyToNewRegister(&inst.base, lhs) + try self.copyToNewRegister(inst, lhs) else lhs; // This instruction supports only signed 32-bit immediates at most. - const src_mcv = try self.limitImmediateType(inst.rhs, i32); + const src_mcv = try self.limitImmediateType(bin_op.rhs, i32); - try self.genX8664BinMathCode(inst.base.src, inst.base.ty, dst_mcv, src_mcv, 7, 0x38); - const info = inst.lhs.ty.intInfo(self.target.*); + try self.genX8664BinMathCode(Type.initTag(.bool), dst_mcv, src_mcv, 7, 0x38); + const info = ty.intInfo(self.target.*); return switch (info.signedness) { .signed => MCValue{ .compare_flags_signed = op }, .unsigned => MCValue{ .compare_flags_unsigned = op }, }; }, .arm, .armeb => { - const lhs = try self.resolveInst(inst.lhs); - const rhs = try self.resolveInst(inst.rhs); - const lhs_is_register = lhs == .register; const rhs_is_register = rhs == .register; // lhs should always be a register - const rhs_should_be_register = try self.armOperandShouldBeRegister(inst.rhs.src, rhs); + const rhs_should_be_register = try self.armOperandShouldBeRegister(rhs); var lhs_mcv = lhs; var rhs_mcv = rhs; @@ -2789,49 +2805,55 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Allocate registers if (rhs_should_be_register) { if (!lhs_is_register and !rhs_is_register) { - const regs = try self.register_manager.allocRegs(2, .{ inst.rhs, inst.lhs }, &.{}); + const regs = try self.register_manager.allocRegs(2, .{ bin_op.rhs, bin_op.lhs }, &.{}); lhs_mcv = MCValue{ .register = regs[0] }; rhs_mcv = MCValue{ .register = regs[1] }; } else if (!rhs_is_register) { - rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(inst.rhs, &.{}) }; + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(bin_op.rhs, &.{}) }; } } if (!lhs_is_register) { - lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(inst.lhs, &.{}) }; + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(bin_op.lhs, &.{}) }; } // Move the operands to the newly allocated registers const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; if (lhs_mcv == .register and !lhs_is_register) { - try self.genSetReg(inst.lhs.src, inst.lhs.ty, lhs_mcv.register, lhs); - branch.inst_table.putAssumeCapacity(inst.lhs, lhs); + try self.genSetReg(ty, lhs_mcv.register, lhs); + branch.inst_table.putAssumeCapacity(bin_op.lhs, lhs); } if (rhs_mcv == .register and !rhs_is_register) { - try self.genSetReg(inst.rhs.src, inst.rhs.ty, rhs_mcv.register, rhs); - branch.inst_table.putAssumeCapacity(inst.rhs, rhs); + try self.genSetReg(ty, rhs_mcv.register, rhs); + branch.inst_table.putAssumeCapacity(bin_op.rhs, rhs); } // The destination register is not present in the cmp instruction - try self.genArmBinOpCode(inst.base.src, undefined, lhs_mcv, rhs_mcv, false, .cmp_eq); + try self.genArmBinOpCode(undefined, lhs_mcv, rhs_mcv, false, .cmp_eq); - const info = inst.lhs.ty.intInfo(self.target.*); + const info = ty.intInfo(self.target.*); return switch (info.signedness) { .signed => MCValue{ .compare_flags_signed = op }, .unsigned => MCValue{ .compare_flags_unsigned = op }, }; }, - else => return self.fail(inst.base.src, "TODO implement cmp for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement cmp for {}", .{self.target.cpu.arch}), } } - fn genDbgStmt(self: *Self, inst: *ir.Inst.DbgStmt) !MCValue { - try self.dbgAdvancePCAndLine(inst.line, inst.column); - assert(inst.base.isUnused()); + fn genDbgStmt(self: *Self, inst: Air.Inst.Index) !MCValue { + const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; + try self.dbgAdvancePCAndLine(dbg_stmt.line, dbg_stmt.column); + assert(self.liveness.isUnused(inst)); return MCValue.dead; } - fn genCondBr(self: *Self, inst: *ir.Inst.CondBr) !MCValue { - const cond = try self.resolveInst(inst.condition); + fn genCondBr(self: *Self, inst: Air.Inst.Index) !MCValue { + const inst_datas = self.air.instructions.items(.data); + const pl_op = inst_datas[inst].pl_op; + const cond = try self.resolveInst(pl_op.operand); + const extra = self.air.extraData(Air.CondBr, inst_data.payload); + const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; + const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; const reloc: Reloc = switch (arch) { .i386, .x86_64 => reloc: { @@ -2880,7 +2902,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { encoder.disp8(1); break :blk 0x84; }, - else => return self.fail(inst.base.src, "TODO implement condbr {s} when condition is {s}", .{ self.target.cpu.arch, @tagName(cond) }), + else => return self.fail("TODO implement condbr {s} when condition is {s}", .{ self.target.cpu.arch, @tagName(cond) }), }; self.code.appendSliceAssumeCapacity(&[_]u8{ 0x0f, opcode }); const reloc = Reloc{ .rel32 = self.code.items.len }; @@ -2906,7 +2928,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.cmp(.al, reg, op).toU32()); break :blk .ne; }, - else => return self.fail(inst.base.src, "TODO implement condbr {} when condition is {s}", .{ self.target.cpu.arch, @tagName(cond) }), + else => return self.fail("TODO implement condbr {} when condition is {s}", .{ self.target.cpu.arch, @tagName(cond) }), }; const reloc = Reloc{ @@ -2918,7 +2940,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.code.resize(self.code.items.len + 4); break :reloc reloc; }, - else => return self.fail(inst.base.src, "TODO implement condbr {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement condbr {}", .{self.target.cpu.arch}), }; // Capture the state of register and stack allocation state so that we can revert to it. @@ -2930,12 +2952,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.branch_stack.append(.{}); - const then_deaths = inst.thenDeaths(); + const then_deaths = self.liveness.thenDeaths(inst); try self.ensureProcessDeathCapacity(then_deaths.len); for (then_deaths) |operand| { self.processDeath(operand); } - try self.genBody(inst.then_body); + try self.genBody(then_body); // Revert to the previous register and stack allocation state. @@ -2951,16 +2973,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.next_stack_offset = parent_next_stack_offset; self.register_manager.free_registers = parent_free_registers; - try self.performReloc(inst.base.src, reloc); + try self.performReloc(reloc); const else_branch = self.branch_stack.addOneAssumeCapacity(); else_branch.* = .{}; - const else_deaths = inst.elseDeaths(); + const else_deaths = self.liveness.elseDeaths(inst); try self.ensureProcessDeathCapacity(else_deaths.len); for (else_deaths) |operand| { self.processDeath(operand); } - try self.genBody(inst.else_body); + try self.genBody(else_body); // At this point, each branch will possibly have conflicting values for where // each instruction is stored. They agree, however, on which instructions are alive/dead. @@ -3003,7 +3025,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { log.debug("consolidating else_entry {*} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(inst.base.src, else_key.ty, canon_mcv, else_value); + try self.setRegOrMem(else_key.ty, canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureCapacity(self.gpa, parent_branch.inst_table.count() + @@ -3031,7 +3053,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { log.debug("consolidating then_entry {*} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(inst.base.src, then_key.ty, parent_mcv, then_value); + try self.setRegOrMem(then_key.ty, parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -3040,58 +3062,155 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return MCValue.unreach; } - fn genIsNull(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn isNull(self: *Self, operand: MCValue) !MCValue { + _ = operand; + // Here you can specialize this instruction if it makes sense to, otherwise the default + // will call isNonNull and invert the result. switch (arch) { - else => return self.fail(inst.base.src, "TODO implement isnull for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO call isNonNull and invert the result", .{}), } } - fn genIsNullPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - return self.fail(inst.base.src, "TODO load the operand and call genIsNull", .{}); - } - - fn genIsNonNull(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn isNonNull(self: *Self, operand: MCValue) !MCValue { + _ = operand; // Here you can specialize this instruction if it makes sense to, otherwise the default - // will call genIsNull and invert the result. + // will call isNull and invert the result. switch (arch) { - else => return self.fail(inst.base.src, "TODO call genIsNull and invert the result ", .{}), + else => return self.fail("TODO call isNull and invert the result", .{}), } } - fn genIsNonNullPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - return self.fail(inst.base.src, "TODO load the operand and call genIsNonNull", .{}); + fn isErr(self: *Self, operand: MCValue) !MCValue { + _ = operand; + // Here you can specialize this instruction if it makes sense to, otherwise the default + // will call isNonNull and invert the result. + switch (arch) { + else => return self.fail("TODO call isNonErr and invert the result", .{}), + } } - fn genIsErr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn isNonErr(self: *Self, operand: MCValue) !MCValue { + _ = operand; + // Here you can specialize this instruction if it makes sense to, otherwise the default + // will call isNull and invert the result. switch (arch) { - else => return self.fail(inst.base.src, "TODO implement iserr for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO call isErr and invert the result", .{}), } } - fn genIsErrPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - return self.fail(inst.base.src, "TODO load the operand and call genIsErr", .{}); + fn genIsNull(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const inst_datas = self.air.instructions.items(.data); + const operand = try self.resolveInst(inst_datas[inst].un_op); + return self.isNull(operand); } - fn genIsNonErr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - switch (arch) { - else => return self.fail(inst.base.src, "TODO implement is_non_err for {}", .{self.target.cpu.arch}), - } + fn genIsNullPtr(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const inst_datas = self.air.instructions.items(.data); + const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, ptr); + return self.isNull(operand); } - fn genIsNonErrPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - return self.fail(inst.base.src, "TODO load the operand and call genIsNonErr", .{}); + fn genIsNonNull(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const inst_datas = self.air.instructions.items(.data); + const operand = try self.resolveInst(inst_datas[inst].un_op); + return self.isNonNull(operand); } - fn genLoop(self: *Self, inst: *ir.Inst.Loop) !MCValue { + fn genIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const inst_datas = self.air.instructions.items(.data); + const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, ptr); + return self.isNonNull(operand); + } + + fn genIsErr(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const inst_datas = self.air.instructions.items(.data); + const operand = try self.resolveInst(inst_datas[inst].un_op); + return self.isErr(operand); + } + + fn genIsErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const inst_datas = self.air.instructions.items(.data); + const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, ptr); + return self.isErr(operand); + } + + fn genIsNonErr(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const inst_datas = self.air.instructions.items(.data); + const operand = try self.resolveInst(inst_datas[inst].un_op); + return self.isNonErr(operand); + } + + fn genIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const inst_datas = self.air.instructions.items(.data); + const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, ptr); + return self.isNonErr(operand); + } + + fn genLoop(self: *Self, inst: Air.Inst.Index) !MCValue { // A loop is a setup to be able to jump back to the beginning. + const inst_datas = self.air.instructions.items(.data); + const loop = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + const body = self.air.extra[loop.end..][0..loop.data.body_len]; const start_index = self.code.items.len; - try self.genBody(inst.body); - try self.jump(inst.base.src, start_index); + try self.genBody(body); + try self.jump(start_index); return MCValue.unreach; } /// Send control flow to the `index` of `self.code`. - fn jump(self: *Self, src: LazySrcLoc, index: usize) !void { + fn jump(self: *Self, index: usize) !void { switch (arch) { .i386, .x86_64 => { try self.code.ensureCapacity(self.code.items.len + 5); @@ -3108,21 +3227,21 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (math.cast(i26, @intCast(i32, index) - @intCast(i32, self.code.items.len + 8))) |delta| { writeInt(u32, try self.code.addManyAsArray(4), Instruction.b(.al, delta).toU32()); } else |_| { - return self.fail(src, "TODO: enable larger branch offset", .{}); + return self.fail("TODO: enable larger branch offset", .{}); } }, .aarch64, .aarch64_be, .aarch64_32 => { if (math.cast(i28, @intCast(i32, index) - @intCast(i32, self.code.items.len + 8))) |delta| { writeInt(u32, try self.code.addManyAsArray(4), Instruction.b(delta).toU32()); } else |_| { - return self.fail(src, "TODO: enable larger branch offset", .{}); + return self.fail("TODO: enable larger branch offset", .{}); } }, - else => return self.fail(src, "TODO implement jump for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement jump for {}", .{self.target.cpu.arch}), } } - fn genBlock(self: *Self, inst: *ir.Inst.Block) !MCValue { + fn genBlock(self: *Self, inst: Air.Inst.Index) !MCValue { try self.blocks.putNoClobber(self.gpa, inst, .{ // A block is a setup to be able to jump to the end. .relocs = .{}, @@ -3136,20 +3255,24 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const block_data = self.blocks.getPtr(inst).?; defer block_data.relocs.deinit(self.gpa); - try self.genBody(inst.body); + const ty_pl = self.air.instructions.items(.data).ty_pl; + const extra = self.air.extraData(Air.Block, ty_pl.payload); + const body = self.air.extra[extra.end..][0..extra.data.body_len]; + try self.genBody(body); - for (block_data.relocs.items) |reloc| try self.performReloc(inst.base.src, reloc); + for (block_data.relocs.items) |reloc| try self.performReloc(reloc); return @bitCast(MCValue, block_data.mcv); } - fn genSwitch(self: *Self, inst: *ir.Inst.SwitchBr) !MCValue { + fn genSwitch(self: *Self, inst: Air.Inst.Index) !MCValue { + _ = inst; switch (arch) { - else => return self.fail(inst.base.src, "TODO genSwitch for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO genSwitch for {}", .{self.target.cpu.arch}), } } - fn performReloc(self: *Self, src: LazySrcLoc, reloc: Reloc) !void { + fn performReloc(self: *Self, reloc: Reloc) !void { switch (reloc) { .rel32 => |pos| { const amt = self.code.items.len - (pos + 4); @@ -3160,7 +3283,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // best place to elide jumps will be in semantic analysis, by inlining blocks that only // only have 1 break instruction. const s32_amt = math.cast(i32, amt) catch - return self.fail(src, "unable to perform relocation: jump too far", .{}); + return self.fail("unable to perform relocation: jump too far", .{}); mem.writeIntLittle(i32, self.code.items[pos..][0..4], s32_amt); }, .arm_branch => |info| { @@ -3170,7 +3293,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (math.cast(i26, amt)) |delta| { writeInt(u32, self.code.items[info.pos..][0..4], Instruction.b(info.cond, delta).toU32()); } else |_| { - return self.fail(src, "TODO: enable larger branch offset", .{}); + return self.fail("TODO: enable larger branch offset", .{}); } }, else => unreachable, // attempting to perfrom an ARM relocation on a non-ARM target arch @@ -3179,41 +3302,39 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genBrBlockFlat(self: *Self, inst: *ir.Inst.BrBlockFlat) !MCValue { + fn genBrBlockFlat(self: *Self, inst: Air.Inst.Index) !MCValue { try self.genBody(inst.body); const last = inst.body.instructions[inst.body.instructions.len - 1]; - return self.br(inst.base.src, inst.block, last); - } - - fn genBr(self: *Self, inst: *ir.Inst.Br) !MCValue { - return self.br(inst.base.src, inst.block, inst.operand); + return self.br(inst.block, last); } - fn genBrVoid(self: *Self, inst: *ir.Inst.BrVoid) !MCValue { - return self.brVoid(inst.base.src, inst.block); + fn genBr(self: *Self, inst: Air.Inst.Index) !MCValue { + return self.br(inst.block, inst.operand); } - fn genBoolOp(self: *Self, inst: *ir.Inst.BinOp) !MCValue { - if (inst.base.isUnused()) + fn genBoolOp(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const air_tags = self.air.instructions.items(.tag); switch (arch) { - .x86_64 => switch (inst.base.tag) { + .x86_64 => switch (air_tags[inst]) { // lhs AND rhs - .bool_and => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs), + .bool_and => return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), // lhs OR rhs - .bool_or => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs), + .bool_or => return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), else => unreachable, // Not a boolean operation }, - .arm, .armeb => switch (inst.base.tag) { - .bool_and => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bool_and), - .bool_or => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bool_or), + .arm, .armeb => switch (air_tags[inst]) { + .bool_and => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_and), + .bool_or => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_or), else => unreachable, // Not a boolean operation }, - else => return self.fail(inst.base.src, "TODO implement boolean operations for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement boolean operations for {}", .{self.target.cpu.arch}), } } - fn br(self: *Self, src: LazySrcLoc, block: *ir.Inst.Block, operand: *ir.Inst) !MCValue { + fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Index) !MCValue { const block_data = self.blocks.getPtr(block).?; if (operand.ty.hasCodeGenBits()) { @@ -3222,13 +3343,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (block_mcv == .none) { block_data.mcv = operand_mcv; } else { - try self.setRegOrMem(src, block.base.ty, block_mcv, operand_mcv); + try self.setRegOrMem(block.base.ty, block_mcv, operand_mcv); } } - return self.brVoid(src, block); + return self.brVoid(block); } - fn brVoid(self: *Self, src: LazySrcLoc, block: *ir.Inst.Block) !MCValue { + fn brVoid(self: *Self, block: Air.Inst.Index) !MCValue { const block_data = self.blocks.getPtr(block).?; // Emit a jump with a relocation. It will be patched up after the block ends. @@ -3252,43 +3373,43 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, }); }, - else => return self.fail(src, "TODO implement brvoid for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement brvoid for {}", .{self.target.cpu.arch}), } return .none; } - fn genAsm(self: *Self, inst: *ir.Inst.Assembly) !MCValue { - if (!inst.is_volatile and inst.base.isUnused()) + fn genAsm(self: *Self, inst: Air.Inst.Index) !MCValue { + if (!inst.is_volatile and self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { .arm, .armeb => { for (inst.inputs) |input, i| { if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm input constraint: '{s}'", .{input}); + return self.fail("unrecognized asm input constraint: '{s}'", .{input}); } const reg_name = input[1 .. input.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(inst.base.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg.ty, reg, arg_mcv); } if (mem.eql(u8, inst.asm_source, "svc #0")) { writeInt(u32, try self.code.addManyAsArray(4), Instruction.svc(.al, 0).toU32()); } else { - return self.fail(inst.base.src, "TODO implement support for more arm assembly instructions", .{}); + return self.fail("TODO implement support for more arm assembly instructions", .{}); } if (inst.output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm output constraint: '{s}'", .{output}); + return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); return MCValue{ .register = reg }; } else { return MCValue.none; @@ -3297,16 +3418,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .aarch64 => { for (inst.inputs) |input, i| { if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm input constraint: '{s}'", .{input}); + return self.fail("unrecognized asm input constraint: '{s}'", .{input}); } const reg_name = input[1 .. input.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(inst.base.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg.ty, reg, arg_mcv); } if (mem.eql(u8, inst.asm_source, "svc #0")) { @@ -3314,16 +3435,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } else if (mem.eql(u8, inst.asm_source, "svc #0x80")) { mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.svc(0x80).toU32()); } else { - return self.fail(inst.base.src, "TODO implement support for more aarch64 assembly instructions", .{}); + return self.fail("TODO implement support for more aarch64 assembly instructions", .{}); } if (inst.output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm output constraint: '{s}'", .{output}); + return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); return MCValue{ .register = reg }; } else { return MCValue.none; @@ -3332,31 +3453,31 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .riscv64 => { for (inst.inputs) |input, i| { if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm input constraint: '{s}'", .{input}); + return self.fail("unrecognized asm input constraint: '{s}'", .{input}); } const reg_name = input[1 .. input.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(inst.base.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg.ty, reg, arg_mcv); } if (mem.eql(u8, inst.asm_source, "ecall")) { mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ecall.toU32()); } else { - return self.fail(inst.base.src, "TODO implement support for more riscv64 assembly instructions", .{}); + return self.fail("TODO implement support for more riscv64 assembly instructions", .{}); } if (inst.output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm output constraint: '{s}'", .{output}); + return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); return MCValue{ .register = reg }; } else { return MCValue.none; @@ -3365,16 +3486,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .x86_64, .i386 => { for (inst.inputs) |input, i| { if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm input constraint: '{s}'", .{input}); + return self.fail("unrecognized asm input constraint: '{s}'", .{input}); } const reg_name = input[1 .. input.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(inst.base.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg.ty, reg, arg_mcv); } { @@ -3385,68 +3506,68 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } else if (mem.indexOf(u8, ins, "push")) |_| { const arg = ins[4..]; if (mem.indexOf(u8, arg, "$")) |l| { - const n = std.fmt.parseInt(u8, ins[4 + l + 1 ..], 10) catch return self.fail(inst.base.src, "TODO implement more inline asm int parsing", .{}); + const n = std.fmt.parseInt(u8, ins[4 + l + 1 ..], 10) catch return self.fail("TODO implement more inline asm int parsing", .{}); try self.code.appendSlice(&.{ 0x6a, n }); } else if (mem.indexOf(u8, arg, "%%")) |l| { const reg_name = ins[4 + l + 2 ..]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); const low_id: u8 = reg.low_id(); if (reg.isExtended()) { try self.code.appendSlice(&.{ 0x41, 0b1010000 | low_id }); } else { try self.code.append(0b1010000 | low_id); } - } else return self.fail(inst.base.src, "TODO more push operands", .{}); + } else return self.fail("TODO more push operands", .{}); } else if (mem.indexOf(u8, ins, "pop")) |_| { const arg = ins[3..]; if (mem.indexOf(u8, arg, "%%")) |l| { const reg_name = ins[3 + l + 2 ..]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); const low_id: u8 = reg.low_id(); if (reg.isExtended()) { try self.code.appendSlice(&.{ 0x41, 0b1011000 | low_id }); } else { try self.code.append(0b1011000 | low_id); } - } else return self.fail(inst.base.src, "TODO more pop operands", .{}); + } else return self.fail("TODO more pop operands", .{}); } else { - return self.fail(inst.base.src, "TODO implement support for more x86 assembly instructions", .{}); + return self.fail("TODO implement support for more x86 assembly instructions", .{}); } } } if (inst.output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm output constraint: '{s}'", .{output}); + return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); return MCValue{ .register = reg }; } else { return MCValue.none; } }, - else => return self.fail(inst.base.src, "TODO implement inline asm support for more architectures", .{}), + else => return self.fail("TODO implement inline asm support for more architectures", .{}), } } /// Sets the value without any modifications to register allocation metadata or stack allocation metadata. - fn setRegOrMem(self: *Self, src: LazySrcLoc, ty: Type, loc: MCValue, val: MCValue) !void { + fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { switch (loc) { .none => return, - .register => |reg| return self.genSetReg(src, ty, reg, val), - .stack_offset => |off| return self.genSetStack(src, ty, off, val), + .register => |reg| return self.genSetReg(ty, reg, val), + .stack_offset => |off| return self.genSetStack(ty, off, val), .memory => { - return self.fail(src, "TODO implement setRegOrMem for memory", .{}); + return self.fail("TODO implement setRegOrMem for memory", .{}); }, else => unreachable, } } - fn genSetStack(self: *Self, src: LazySrcLoc, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { + fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { switch (arch) { .arm, .armeb => switch (mcv) { .dead => unreachable, @@ -3458,28 +3579,28 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. switch (ty.abiSize(self.target.*)) { - 1 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaa }), - 2 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaa }), - 4 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), - 8 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), - else => return self.fail(src, "TODO implement memset", .{}), + 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), + 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), + 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), + 8 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), + else => return self.fail("TODO implement memset", .{}), } }, .compare_flags_unsigned => |op| { _ = op; - return self.fail(src, "TODO implement set stack variable with compare flags value (unsigned)", .{}); + return self.fail("TODO implement set stack variable with compare flags value (unsigned)", .{}); }, .compare_flags_signed => |op| { _ = op; - return self.fail(src, "TODO implement set stack variable with compare flags value (signed)", .{}); + return self.fail("TODO implement set stack variable with compare flags value (signed)", .{}); }, .immediate => { - const reg = try self.copyToTmpRegister(src, ty, mcv); - return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); + const reg = try self.copyToTmpRegister(ty, mcv); + return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); }, .embedded_in_code => |code_offset| { _ = code_offset; - return self.fail(src, "TODO implement set stack variable from embedded_in_code", .{}); + return self.fail("TODO implement set stack variable from embedded_in_code", .{}); }, .register => |reg| { const abi_size = ty.abiSize(self.target.*); @@ -3489,7 +3610,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { 1, 4 => { const offset = if (math.cast(u12, adj_off)) |imm| blk: { break :blk Instruction.Offset.imm(imm); - } else |_| Instruction.Offset.reg(try self.copyToTmpRegister(src, Type.initTag(.u32), MCValue{ .immediate = adj_off }), 0); + } else |_| Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }), 0); const str = switch (abi_size) { 1 => Instruction.strb, 4 => Instruction.str, @@ -3504,26 +3625,26 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { 2 => { const offset = if (adj_off <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, adj_off)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(src, Type.initTag(.u32), MCValue{ .immediate = adj_off })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off })); writeInt(u32, try self.code.addManyAsArray(4), Instruction.strh(.al, reg, .fp, .{ .offset = offset, .positive = false, }).toU32()); }, - else => return self.fail(src, "TODO implement storing other types abi_size={}", .{abi_size}), + else => return self.fail("TODO implement storing other types abi_size={}", .{abi_size}), } }, .memory => |vaddr| { _ = vaddr; - return self.fail(src, "TODO implement set stack variable from memory vaddr", .{}); + return self.fail("TODO implement set stack variable from memory vaddr", .{}); }, .stack_offset => |off| { if (stack_offset == off) return; // Copy stack variable to itself; nothing to do. - const reg = try self.copyToTmpRegister(src, ty, mcv); - return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); + const reg = try self.copyToTmpRegister(ty, mcv); + return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); }, }, .x86_64 => switch (mcv) { @@ -3536,34 +3657,34 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. switch (ty.abiSize(self.target.*)) { - 1 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaa }), - 2 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaa }), - 4 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), - 8 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), - else => return self.fail(src, "TODO implement memset", .{}), + 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), + 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), + 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), + 8 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), + else => return self.fail("TODO implement memset", .{}), } }, .compare_flags_unsigned => |op| { _ = op; - return self.fail(src, "TODO implement set stack variable with compare flags value (unsigned)", .{}); + return self.fail("TODO implement set stack variable with compare flags value (unsigned)", .{}); }, .compare_flags_signed => |op| { _ = op; - return self.fail(src, "TODO implement set stack variable with compare flags value (signed)", .{}); + return self.fail("TODO implement set stack variable with compare flags value (signed)", .{}); }, .immediate => |x_big| { const abi_size = ty.abiSize(self.target.*); const adj_off = stack_offset + abi_size; if (adj_off > 128) { - return self.fail(src, "TODO implement set stack variable with large stack offset", .{}); + return self.fail("TODO implement set stack variable with large stack offset", .{}); } try self.code.ensureCapacity(self.code.items.len + 8); switch (abi_size) { 1 => { - return self.fail(src, "TODO implement set abi_size=1 stack variable with immediate", .{}); + return self.fail("TODO implement set abi_size=1 stack variable with immediate", .{}); }, 2 => { - return self.fail(src, "TODO implement set abi_size=2 stack variable with immediate", .{}); + return self.fail("TODO implement set abi_size=2 stack variable with immediate", .{}); }, 4 => { const x = @intCast(u32, x_big); @@ -3596,22 +3717,22 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.code.appendSliceAssumeCapacity(buf[0..4]); }, else => { - return self.fail(src, "TODO implement set abi_size=large stack variable with immediate", .{}); + return self.fail("TODO implement set abi_size=large stack variable with immediate", .{}); }, } }, .embedded_in_code => { // TODO this and `.stack_offset` below need to get improved to support types greater than // register size, and do general memcpy - const reg = try self.copyToTmpRegister(src, ty, mcv); - return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); + const reg = try self.copyToTmpRegister(ty, mcv); + return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); }, .register => |reg| { try self.genX8664ModRMRegToStack(src, ty, stack_offset, reg, 0x89); }, .memory => |vaddr| { _ = vaddr; - return self.fail(src, "TODO implement set stack variable from memory vaddr", .{}); + return self.fail("TODO implement set stack variable from memory vaddr", .{}); }, .stack_offset => |off| { // TODO this and `.embedded_in_code` above need to get improved to support types greater than @@ -3620,8 +3741,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (stack_offset == off) return; // Copy stack variable to itself; nothing to do. - const reg = try self.copyToTmpRegister(src, ty, mcv); - return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); + const reg = try self.copyToTmpRegister(ty, mcv); + return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); }, }, .aarch64, .aarch64_be, .aarch64_32 => switch (mcv) { @@ -3634,28 +3755,28 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. switch (ty.abiSize(self.target.*)) { - 1 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaa }), - 2 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaa }), - 4 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), - 8 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), - else => return self.fail(src, "TODO implement memset", .{}), + 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), + 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), + 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), + 8 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), + else => return self.fail("TODO implement memset", .{}), } }, .compare_flags_unsigned => |op| { _ = op; - return self.fail(src, "TODO implement set stack variable with compare flags value (unsigned)", .{}); + return self.fail("TODO implement set stack variable with compare flags value (unsigned)", .{}); }, .compare_flags_signed => |op| { _ = op; - return self.fail(src, "TODO implement set stack variable with compare flags value (signed)", .{}); + return self.fail("TODO implement set stack variable with compare flags value (signed)", .{}); }, .immediate => { - const reg = try self.copyToTmpRegister(src, ty, mcv); - return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); + const reg = try self.copyToTmpRegister(ty, mcv); + return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); }, .embedded_in_code => |code_offset| { _ = code_offset; - return self.fail(src, "TODO implement set stack variable from embedded_in_code", .{}); + return self.fail("TODO implement set stack variable from embedded_in_code", .{}); }, .register => |reg| { const abi_size = ty.abiSize(self.target.*); @@ -3666,7 +3787,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const offset = if (math.cast(i9, adj_off)) |imm| Instruction.LoadStoreOffset.imm_post_index(-imm) else |_| - Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(src, Type.initTag(.u64), MCValue{ .immediate = adj_off })); + Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u64), MCValue{ .immediate = adj_off })); const rn: Register = switch (arch) { .aarch64, .aarch64_be => .x29, .aarch64_32 => .w29, @@ -3683,26 +3804,26 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .offset = offset, }).toU32()); }, - else => return self.fail(src, "TODO implement storing other types abi_size={}", .{abi_size}), + else => return self.fail("TODO implement storing other types abi_size={}", .{abi_size}), } }, .memory => |vaddr| { _ = vaddr; - return self.fail(src, "TODO implement set stack variable from memory vaddr", .{}); + return self.fail("TODO implement set stack variable from memory vaddr", .{}); }, .stack_offset => |off| { if (stack_offset == off) return; // Copy stack variable to itself; nothing to do. - const reg = try self.copyToTmpRegister(src, ty, mcv); - return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); + const reg = try self.copyToTmpRegister(ty, mcv); + return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); }, }, - else => return self.fail(src, "TODO implement getSetStack for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement getSetStack for {}", .{self.target.cpu.arch}), } } - fn genSetReg(self: *Self, src: LazySrcLoc, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { switch (arch) { .arm, .armeb => switch (mcv) { .dead => unreachable, @@ -3713,7 +3834,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (!self.wantSafety()) return; // The already existing value will do just fine. // Write the debug undefined value. - return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaaaaaa }); + return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaa }); }, .compare_flags_unsigned, .compare_flags_signed, @@ -3732,7 +3853,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.mov(condition, reg, one).toU32()); }, .immediate => |x| { - if (x > math.maxInt(u32)) return self.fail(src, "ARM registers are 32-bit wide", .{}); + if (x > math.maxInt(u32)) return self.fail("ARM registers are 32-bit wide", .{}); if (Instruction.Operand.fromU32(@intCast(u32, x))) |op| { writeInt(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, reg, op).toU32()); @@ -3778,7 +3899,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .memory => |addr| { // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. - try self.genSetReg(src, ty, reg, .{ .immediate = addr }); + try self.genSetReg(ty, reg, .{ .immediate = addr }); writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldr(.al, reg, reg, .{ .offset = Instruction.Offset.none }).toU32()); }, .stack_offset => |unadjusted_off| { @@ -3790,7 +3911,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { 1, 4 => { const offset = if (adj_off <= math.maxInt(u12)) blk: { break :blk Instruction.Offset.imm(@intCast(u12, adj_off)); - } else Instruction.Offset.reg(try self.copyToTmpRegister(src, Type.initTag(.u32), MCValue{ .immediate = adj_off }), 0); + } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }), 0); const ldr = switch (abi_size) { 1 => Instruction.ldrb, 4 => Instruction.ldr, @@ -3805,17 +3926,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { 2 => { const offset = if (adj_off <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, adj_off)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(src, Type.initTag(.u32), MCValue{ .immediate = adj_off })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off })); writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldrh(.al, reg, .fp, .{ .offset = offset, .positive = false, }).toU32()); }, - else => return self.fail(src, "TODO a type of size {} is not allowed in a register", .{abi_size}), + else => return self.fail("TODO a type of size {} is not allowed in a register", .{abi_size}), } }, - else => return self.fail(src, "TODO implement getSetReg for arm {}", .{mcv}), + else => return self.fail("TODO implement getSetReg for arm {}", .{mcv}), }, .aarch64 => switch (mcv) { .dead => unreachable, @@ -3827,8 +3948,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return; // The already existing value will do just fine. // Write the debug undefined value. switch (reg.size()) { - 32 => return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaaaaaa }), - 64 => return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), + 32 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaa }), + 64 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), else => unreachable, // unexpected register size } }, @@ -3876,7 +3997,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .size = 4, }); } else { - return self.fail(src, "TODO implement genSetReg for PIE GOT indirection on this platform", .{}); + return self.fail("TODO implement genSetReg for PIE GOT indirection on this platform", .{}); } mem.writeIntLittle( u32, @@ -3893,7 +4014,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } else { // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. - try self.genSetReg(src, Type.initTag(.usize), reg, .{ .immediate = addr }); + try self.genSetReg(Type.initTag(.usize), reg, .{ .immediate = addr }); mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(reg, .{ .register = .{ .rn = reg } }).toU32()); } }, @@ -3911,7 +4032,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const offset = if (math.cast(i9, adj_off)) |imm| Instruction.LoadStoreOffset.imm_post_index(-imm) else |_| - Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(src, Type.initTag(.u64), MCValue{ .immediate = adj_off })); + Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u64), MCValue{ .immediate = adj_off })); switch (abi_size) { 1, 2 => { @@ -3931,10 +4052,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .offset = offset, } }).toU32()); }, - else => return self.fail(src, "TODO implement genSetReg other types abi_size={}", .{abi_size}), + else => return self.fail("TODO implement genSetReg other types abi_size={}", .{abi_size}), } }, - else => return self.fail(src, "TODO implement genSetReg for aarch64 {}", .{mcv}), + else => return self.fail("TODO implement genSetReg for aarch64 {}", .{mcv}), }, .riscv64 => switch (mcv) { .dead => unreachable, @@ -3945,7 +4066,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (!self.wantSafety()) return; // The already existing value will do just fine. // Write the debug undefined value. - return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); + return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); }, .immediate => |unsigned_x| { const x = @bitCast(i64, unsigned_x); @@ -3965,19 +4086,19 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } // li rd, immediate // "Myriad sequences" - return self.fail(src, "TODO genSetReg 33-64 bit immediates for riscv64", .{}); // glhf + return self.fail("TODO genSetReg 33-64 bit immediates for riscv64", .{}); // glhf }, .memory => |addr| { // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. - try self.genSetReg(src, ty, reg, .{ .immediate = addr }); + try self.genSetReg(ty, reg, .{ .immediate = addr }); mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ld(reg, 0, reg).toU32()); // LOAD imm=[i12 offset = 0], rs1 = // return self.fail("TODO implement genSetReg memory for riscv64"); }, - else => return self.fail(src, "TODO implement getSetReg for riscv64 {}", .{mcv}), + else => return self.fail("TODO implement getSetReg for riscv64 {}", .{mcv}), }, .x86_64 => switch (mcv) { .dead => unreachable, @@ -3989,10 +4110,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return; // The already existing value will do just fine. // Write the debug undefined value. switch (reg.size()) { - 8 => return self.genSetReg(src, ty, reg, .{ .immediate = 0xaa }), - 16 => return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaa }), - 32 => return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaaaaaa }), - 64 => return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), + 8 => return self.genSetReg(ty, reg, .{ .immediate = 0xaa }), + 16 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaa }), + 32 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaa }), + 64 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), else => unreachable, } }, @@ -4019,7 +4140,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, .compare_flags_signed => |op| { _ = op; - return self.fail(src, "TODO set register with compare flags value (signed)", .{}); + return self.fail("TODO set register with compare flags value (signed)", .{}); }, .immediate => |x| { // 32-bit moves zero-extend to 64-bit, so xoring the 32-bit @@ -4152,7 +4273,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .size = 4, }); } else { - return self.fail(src, "TODO implement genSetReg for PIE GOT indirection on this platform", .{}); + return self.fail("TODO implement genSetReg for PIE GOT indirection on this platform", .{}); } // MOV reg, [reg] @@ -4208,7 +4329,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { assert(id3 != 4 and id3 != 5); // Rather than duplicate the logic used for the move, we just use a self-call with a new MCValue. - try self.genSetReg(src, ty, reg, MCValue{ .immediate = x }); + try self.genSetReg(ty, reg, MCValue{ .immediate = x }); // Now, the register contains the address of the value to load into it // Currently, we're only allowing 64-bit registers, so we need the `REX.W 8B /r` variant. @@ -4231,7 +4352,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const abi_size = ty.abiSize(self.target.*); const off = unadjusted_off + abi_size; if (off < std.math.minInt(i32) or off > std.math.maxInt(i32)) { - return self.fail(src, "stack offset too large", .{}); + return self.fail("stack offset too large", .{}); } const ioff = -@intCast(i32, off); const encoder = try X8664Encoder.init(self.code, 3); @@ -4251,21 +4372,21 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } }, }, - else => return self.fail(src, "TODO implement getSetReg for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement getSetReg for {}", .{self.target.cpu.arch}), } } - fn genPtrToInt(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - // no-op - return self.resolveInst(inst.operand); + fn genPtrToInt(self: *Self, inst: Air.Inst.Index) !MCValue { + const inst_datas = self.air.instructions.items(.data); + return self.resolveInst(inst_datas[inst].un_op); } - fn genBitCast(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - const operand = try self.resolveInst(inst.operand); - return operand; + fn genBitCast(self: *Self, inst: Air.Inst.Index) !MCValue { + const inst_datas = self.air.instructions.items(.data); + return self.resolveInst(inst_datas[inst].ty_op.operand); } - fn resolveInst(self: *Self, inst: *ir.Inst) !MCValue { + fn resolveInst(self: *Self, inst: Air.Inst.Index) !MCValue { // If the type has no codegen bits, no need to store it. if (!inst.ty.hasCodeGenBits()) return MCValue.none; @@ -4283,7 +4404,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.getResolvedInstValue(inst); } - fn getResolvedInstValue(self: *Self, inst: *ir.Inst) MCValue { + fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { // Treat each stack item as a "layer" on top of the previous one. var i: usize = self.branch_stack.items.len; while (true) { @@ -4300,7 +4421,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// A potential opportunity for future optimization here would be keeping track /// of the fact that the instruction is available both as an immediate /// and as a register. - fn limitImmediateType(self: *Self, inst: *ir.Inst, comptime T: type) !MCValue { + fn limitImmediateType(self: *Self, inst: Air.Inst.Index, comptime T: type) !MCValue { const mcv = try self.resolveInst(inst); const ti = @typeInfo(T).Int; switch (mcv) { @@ -4308,7 +4429,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // This immediate is unsigned. const U = std.meta.Int(.unsigned, ti.bits - @boolToInt(ti.signedness == .signed)); if (imm >= math.maxInt(U)) { - return MCValue{ .register = try self.copyToTmpRegister(inst.src, Type.initTag(.usize), mcv) }; + return MCValue{ .register = try self.copyToTmpRegister(Type.initTag(.usize), mcv) }; } }, else => {}, @@ -4334,7 +4455,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { _ = slice_len; _ = ptr_imm; // We need more general support for const data being stored in memory to make this work. - return self.fail(src, "TODO codegen for const slices", .{}); + return self.fail("TODO codegen for const slices", .{}); }, else => { if (typed_value.val.castTag(.decl_ref)) |payload| { @@ -4360,19 +4481,19 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; return MCValue{ .memory = got_addr }; } else { - return self.fail(src, "TODO codegen non-ELF const Decl pointer", .{}); + return self.fail("TODO codegen non-ELF const Decl pointer", .{}); } } if (typed_value.val.tag() == .int_u64) { return MCValue{ .immediate = typed_value.val.toUnsignedInt() }; } - return self.fail(src, "TODO codegen more kinds of const pointers", .{}); + return self.fail("TODO codegen more kinds of const pointers", .{}); }, }, .Int => { const info = typed_value.ty.intInfo(self.target.*); if (info.bits > ptr_bits or info.signedness == .signed) { - return self.fail(src, "TODO const int bigger than ptr and signed int", .{}); + return self.fail("TODO const int bigger than ptr and signed int", .{}); } return MCValue{ .immediate = typed_value.val.toUnsignedInt() }; }, @@ -4394,9 +4515,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } else if (typed_value.ty.abiSize(self.target.*) == 1) { return MCValue{ .immediate = @boolToInt(typed_value.val.isNull()) }; } - return self.fail(src, "TODO non pointer optionals", .{}); + return self.fail("TODO non pointer optionals", .{}); }, - else => return self.fail(src, "TODO implement const of type '{}'", .{typed_value.ty}), + else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty}), } } @@ -4413,7 +4534,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }; /// Caller must call `CallMCValues.deinit`. - fn resolveCallingConventionValues(self: *Self, src: LazySrcLoc, fn_ty: Type) !CallMCValues { + fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const cc = fn_ty.fnCallingConvention(); const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); defer self.gpa.free(param_types); @@ -4482,7 +4603,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { result.stack_byte_count = next_stack_offset; result.stack_align = 16; }, - else => return self.fail(src, "TODO implement function parameters for {} on x86_64", .{cc}), + else => return self.fail("TODO implement function parameters for {} on x86_64", .{cc}), } }, .arm, .armeb => { @@ -4509,10 +4630,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] }; ncrn += 1; } else { - return self.fail(src, "TODO MCValues with multiple registers", .{}); + return self.fail("TODO MCValues with multiple registers", .{}); } } else if (ncrn < 4 and nsaa == 0) { - return self.fail(src, "TODO MCValues split between registers and stack", .{}); + return self.fail("TODO MCValues split between registers and stack", .{}); } else { ncrn = 4; if (ty.abiAlignment(self.target.*) == 8) @@ -4526,7 +4647,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { result.stack_byte_count = nsaa; result.stack_align = 4; }, - else => return self.fail(src, "TODO implement function parameters for {} on arm", .{cc}), + else => return self.fail("TODO implement function parameters for {} on arm", .{cc}), } }, .aarch64 => { @@ -4557,10 +4678,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] }; ncrn += 1; } else { - return self.fail(src, "TODO MCValues with multiple registers", .{}); + return self.fail("TODO MCValues with multiple registers", .{}); } } else if (ncrn < 8 and nsaa == 0) { - return self.fail(src, "TODO MCValues split between registers and stack", .{}); + return self.fail("TODO MCValues split between registers and stack", .{}); } else { ncrn = 8; // TODO Apple allows the arguments on the stack to be non-8-byte aligned provided @@ -4579,11 +4700,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { result.stack_byte_count = nsaa; result.stack_align = 16; }, - else => return self.fail(src, "TODO implement function parameters for {} on aarch64", .{cc}), + else => return self.fail("TODO implement function parameters for {} on aarch64", .{cc}), } }, else => if (param_types.len != 0) - return self.fail(src, "TODO implement codegen parameters for {}", .{self.target.cpu.arch}), + return self.fail("TODO implement codegen parameters for {}", .{self.target.cpu.arch}), } if (ret_ty.zigTypeTag() == .NoReturn) { @@ -4598,7 +4719,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const aliased_reg = registerAlias(c_abi_int_return_regs[0], ret_ty_size); result.return_value = .{ .register = aliased_reg }; }, - else => return self.fail(src, "TODO implement function return values for {}", .{cc}), + else => return self.fail("TODO implement function return values for {}", .{cc}), }, .arm, .armeb => switch (cc) { .Naked => unreachable, @@ -4607,10 +4728,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (ret_ty_size <= 4) { result.return_value = .{ .register = c_abi_int_return_regs[0] }; } else { - return self.fail(src, "TODO support more return types for ARM backend", .{}); + return self.fail("TODO support more return types for ARM backend", .{}); } }, - else => return self.fail(src, "TODO implement function return values for {}", .{cc}), + else => return self.fail("TODO implement function return values for {}", .{cc}), }, .aarch64 => switch (cc) { .Naked => unreachable, @@ -4619,12 +4740,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (ret_ty_size <= 8) { result.return_value = .{ .register = c_abi_int_return_regs[0] }; } else { - return self.fail(src, "TODO support more return types for ARM backend", .{}); + return self.fail("TODO support more return types for ARM backend", .{}); } }, - else => return self.fail(src, "TODO implement function return values for {}", .{cc}), + else => return self.fail("TODO implement function return values for {}", .{cc}), }, - else => return self.fail(src, "TODO implement codegen return values for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement codegen return values for {}", .{self.target.cpu.arch}), } return result; } diff --git a/src/register_manager.zig b/src/register_manager.zig index 9c61423706..8aca7fcc3d 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -147,14 +147,14 @@ pub fn RegisterManager( self.markRegUsed(reg); } else { const spilled_inst = self.registers[index].?; - try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst); + try self.getFunction().spillInstruction(reg, spilled_inst); } self.registers[index] = inst; } else { // Don't track the register if (!self.isRegFree(reg)) { const spilled_inst = self.registers[index].?; - try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst); + try self.getFunction().spillInstruction(reg, spilled_inst); self.freeReg(reg); } } @@ -184,7 +184,7 @@ pub fn RegisterManager( // stack allocation. const spilled_inst = self.registers[index].?; self.registers[index] = tracked_inst; - try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst); + try self.getFunction().spillInstruction(reg, spilled_inst); } else { self.getRegAssumeFree(reg, tracked_inst); } @@ -193,7 +193,7 @@ pub fn RegisterManager( // Move the instruction that was previously there to a // stack allocation. const spilled_inst = self.registers[index].?; - try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst); + try self.getFunction().spillInstruction(reg, spilled_inst); self.freeReg(reg); } } @@ -264,8 +264,7 @@ fn MockFunction(comptime Register: type) type { self.spilled.deinit(self.allocator); } - pub fn spillInstruction(self: *Self, src: LazySrcLoc, reg: Register, inst: *ir.Inst) !void { - _ = src; + pub fn spillInstruction(self: *Self, reg: Register, inst: *ir.Inst) !void { _ = inst; try self.spilled.append(self.allocator, reg); } -- cgit v1.2.3 From 0f38f686964664f68e013ec3c63cfe655001f165 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 12 Jul 2021 19:51:31 -0700 Subject: stage2: Air and Liveness are passed ephemerally to the link infrastructure, instead of being stored with Module.Fn. This moves towards a strategy to make more efficient use of memory by not storing Air or Liveness data in the Fn struct, but computing it on demand, immediately sending it to the backend, and then immediately freeing it. Backends which want to defer codegen until flush() such as SPIR-V must move the Air/Liveness data upon `updateFunc` being called and keep track of that data in the backend implementation itself. --- BRANCH_TODO | 5 + src/Compilation.zig | 2 +- src/Liveness.zig | 9 +- src/Module.zig | 5 - src/Sema.zig | 762 +++++++++++++++++++++++++------------------------- src/codegen.zig | 7 +- src/codegen/c.zig | 9 +- src/codegen/llvm.zig | 3 + src/codegen/spirv.zig | 3 +- src/codegen/wasm.zig | 88 +++--- src/link.zig | 34 ++- src/link/C.zig | 28 +- src/link/Coff.zig | 56 +++- src/link/Elf.zig | 558 +++++++++++++++++++----------------- src/link/MachO.zig | 55 ++++ src/link/Plan9.zig | 29 +- src/link/SpirV.zig | 24 +- src/link/Wasm.zig | 59 +++- 18 files changed, 1023 insertions(+), 713 deletions(-) (limited to 'src/Liveness.zig') diff --git a/BRANCH_TODO b/BRANCH_TODO index 585c8adf44..c7f3923559 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -690,3 +690,8 @@ pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { } } + /// For debugging purposes. + pub fn dump(func: *Fn, mod: Module) void { + ir.dumpFn(mod, func); + } + diff --git a/src/Compilation.zig b/src/Compilation.zig index 74ad7b2aae..90224a77d1 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2027,7 +2027,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor defer liveness.deinit(gpa); if (std.builtin.mode == .Debug and self.verbose_air) { - func.dump(module.*); + @panic("TODO implement dumping AIR and liveness"); } assert(decl.ty.hasCodeGenBits()); diff --git a/src/Liveness.zig b/src/Liveness.zig index 0cbac61118..1402a5997b 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -50,7 +50,7 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { var a: Analysis = .{ .gpa = gpa, - .air = &air, + .air = air, .table = .{}, .tomb_bits = try gpa.alloc( usize, @@ -65,7 +65,7 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { defer a.table.deinit(gpa); const main_body = air.getMainBody(); - try a.table.ensureTotalCapacity(main_body.len); + try a.table.ensureTotalCapacity(gpa, @intCast(u32, main_body.len)); try analyzeWithContext(&a, null, main_body); return Liveness{ .tomb_bits = a.tomb_bits, @@ -108,9 +108,10 @@ const OperandInt = std.math.Log2Int(Bpi); /// In-progress data; on successful analysis converted into `Liveness`. const Analysis = struct { gpa: *Allocator, - air: *const Air, + air: Air, table: std.AutoHashMapUnmanaged(Air.Inst.Index, void), tomb_bits: []usize, + special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), extra: std.ArrayListUnmanaged(u32), fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void { @@ -165,7 +166,7 @@ fn analyzeWithContext( fn analyzeInst( a: *Analysis, - new_set: ?*std.AutoHashMap(Air.Inst.Index, void), + new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), inst: Air.Inst.Index, ) Allocator.Error!void { const gpa = a.gpa; diff --git a/src/Module.zig b/src/Module.zig index 8971a57487..5972c2bdcf 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -769,11 +769,6 @@ pub const Fn = struct { success, }; - /// For debugging purposes. - pub fn dump(func: *Fn, mod: Module) void { - ir.dumpFn(mod, func); - } - pub fn deinit(func: *Fn, gpa: *Allocator) void { if (func.getInferredErrorSet()) |map| { map.deinit(gpa); diff --git a/src/Sema.zig b/src/Sema.zig index d7ec01696f..54c42a482d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -69,7 +69,7 @@ const LazySrcLoc = Module.LazySrcLoc; const RangeSet = @import("RangeSet.zig"); const target_util = @import("target.zig"); -pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Index); +pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Ref); pub fn deinit(sema: *Sema) void { const gpa = sema.gpa; @@ -158,344 +158,344 @@ pub fn analyzeBody( var i: usize = 0; while (true) { const inst = body[i]; - const air_inst = switch (tags[inst]) { + const air_inst: Air.Inst.Ref = switch (tags[inst]) { // zig fmt: off .arg => try sema.zirArg(block, inst), - .alloc => try sema.zirAlloc(block, inst), - .alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), - .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), - .alloc_inferred_comptime => try sema.zirAllocInferredComptime(block, inst), - .alloc_mut => try sema.zirAllocMut(block, inst), - .alloc_comptime => try sema.zirAllocComptime(block, inst), - .anyframe_type => try sema.zirAnyframeType(block, inst), - .array_cat => try sema.zirArrayCat(block, inst), - .array_mul => try sema.zirArrayMul(block, inst), - .array_type => try sema.zirArrayType(block, inst), - .array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), - .vector_type => try sema.zirVectorType(block, inst), - .as => try sema.zirAs(block, inst), - .as_node => try sema.zirAsNode(block, inst), - .bit_and => try sema.zirBitwise(block, inst, .bit_and), - .bit_not => try sema.zirBitNot(block, inst), - .bit_or => try sema.zirBitwise(block, inst, .bit_or), - .bitcast => try sema.zirBitcast(block, inst), - .bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst), - .block => try sema.zirBlock(block, inst), - .suspend_block => try sema.zirSuspendBlock(block, inst), - .bool_not => try sema.zirBoolNot(block, inst), - .bool_and => try sema.zirBoolOp(block, inst, false), - .bool_or => try sema.zirBoolOp(block, inst, true), - .bool_br_and => try sema.zirBoolBr(block, inst, false), - .bool_br_or => try sema.zirBoolBr(block, inst, true), - .c_import => try sema.zirCImport(block, inst), - .call => try sema.zirCall(block, inst, .auto, false), - .call_chkused => try sema.zirCall(block, inst, .auto, true), - .call_compile_time => try sema.zirCall(block, inst, .compile_time, false), - .call_nosuspend => try sema.zirCall(block, inst, .no_async, false), - .call_async => try sema.zirCall(block, inst, .async_kw, false), - .cmp_eq => try sema.zirCmp(block, inst, .eq), - .cmp_gt => try sema.zirCmp(block, inst, .gt), - .cmp_gte => try sema.zirCmp(block, inst, .gte), - .cmp_lt => try sema.zirCmp(block, inst, .lt), - .cmp_lte => try sema.zirCmp(block, inst, .lte), - .cmp_neq => try sema.zirCmp(block, inst, .neq), - .coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), - .decl_ref => try sema.zirDeclRef(block, inst), - .decl_val => try sema.zirDeclVal(block, inst), - .load => try sema.zirLoad(block, inst), - .elem_ptr => try sema.zirElemPtr(block, inst), - .elem_ptr_node => try sema.zirElemPtrNode(block, inst), - .elem_val => try sema.zirElemVal(block, inst), - .elem_val_node => try sema.zirElemValNode(block, inst), - .elem_type => try sema.zirElemType(block, inst), - .enum_literal => try sema.zirEnumLiteral(block, inst), - .enum_to_int => try sema.zirEnumToInt(block, inst), - .int_to_enum => try sema.zirIntToEnum(block, inst), - .err_union_code => try sema.zirErrUnionCode(block, inst), - .err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst), - .err_union_payload_safe => try sema.zirErrUnionPayload(block, inst, true), - .err_union_payload_safe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, true), - .err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst, false), - .err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, false), - .error_union_type => try sema.zirErrorUnionType(block, inst), - .error_value => try sema.zirErrorValue(block, inst), - .error_to_int => try sema.zirErrorToInt(block, inst), - .int_to_error => try sema.zirIntToError(block, inst), - .field_ptr => try sema.zirFieldPtr(block, inst), - .field_ptr_named => try sema.zirFieldPtrNamed(block, inst), - .field_val => try sema.zirFieldVal(block, inst), - .field_val_named => try sema.zirFieldValNamed(block, inst), - .func => try sema.zirFunc(block, inst, false), - .func_inferred => try sema.zirFunc(block, inst, true), - .import => try sema.zirImport(block, inst), - .indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst), - .int => try sema.zirInt(block, inst), - .int_big => try sema.zirIntBig(block, inst), - .float => try sema.zirFloat(block, inst), - .float128 => try sema.zirFloat128(block, inst), - .int_type => try sema.zirIntType(block, inst), - .is_non_err => try sema.zirIsNonErr(block, inst), - .is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), - .is_non_null => try sema.zirIsNonNull(block, inst), - .is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), - .loop => try sema.zirLoop(block, inst), - .merge_error_sets => try sema.zirMergeErrorSets(block, inst), - .negate => try sema.zirNegate(block, inst, .sub), - .negate_wrap => try sema.zirNegate(block, inst, .subwrap), - .optional_payload_safe => try sema.zirOptionalPayload(block, inst, true), - .optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true), - .optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false), - .optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false), - .optional_type => try sema.zirOptionalType(block, inst), - .param_type => try sema.zirParamType(block, inst), - .ptr_type => try sema.zirPtrType(block, inst), - .ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), - .ref => try sema.zirRef(block, inst), - .ret_err_value_code => try sema.zirRetErrValueCode(block, inst), - .shl => try sema.zirShl(block, inst), - .shr => try sema.zirShr(block, inst), - .slice_end => try sema.zirSliceEnd(block, inst), - .slice_sentinel => try sema.zirSliceSentinel(block, inst), - .slice_start => try sema.zirSliceStart(block, inst), - .str => try sema.zirStr(block, inst), - .switch_block => try sema.zirSwitchBlock(block, inst, false, .none), - .switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none), - .switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"), - .switch_block_else_multi => try sema.zirSwitchBlockMulti(block, inst, false, .@"else"), - .switch_block_under => try sema.zirSwitchBlock(block, inst, false, .under), - .switch_block_under_multi => try sema.zirSwitchBlockMulti(block, inst, false, .under), - .switch_block_ref => try sema.zirSwitchBlock(block, inst, true, .none), - .switch_block_ref_multi => try sema.zirSwitchBlockMulti(block, inst, true, .none), - .switch_block_ref_else => try sema.zirSwitchBlock(block, inst, true, .@"else"), - .switch_block_ref_else_multi => try sema.zirSwitchBlockMulti(block, inst, true, .@"else"), - .switch_block_ref_under => try sema.zirSwitchBlock(block, inst, true, .under), - .switch_block_ref_under_multi => try sema.zirSwitchBlockMulti(block, inst, true, .under), - .switch_capture => try sema.zirSwitchCapture(block, inst, false, false), - .switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), - .switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), - .switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), - .switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), - .switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), - .type_info => try sema.zirTypeInfo(block, inst), - .size_of => try sema.zirSizeOf(block, inst), - .bit_size_of => try sema.zirBitSizeOf(block, inst), - .typeof => try sema.zirTypeof(block, inst), - .typeof_elem => try sema.zirTypeofElem(block, inst), - .log2_int_type => try sema.zirLog2IntType(block, inst), - .typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst), - .xor => try sema.zirBitwise(block, inst, .xor), - .struct_init_empty => try sema.zirStructInitEmpty(block, inst), - .struct_init => try sema.zirStructInit(block, inst, false), - .struct_init_ref => try sema.zirStructInit(block, inst, true), - .struct_init_anon => try sema.zirStructInitAnon(block, inst, false), - .struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true), - .array_init => try sema.zirArrayInit(block, inst, false), - .array_init_ref => try sema.zirArrayInit(block, inst, true), - .array_init_anon => try sema.zirArrayInitAnon(block, inst, false), - .array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true), - .union_init_ptr => try sema.zirUnionInitPtr(block, inst), - .field_type => try sema.zirFieldType(block, inst), - .field_type_ref => try sema.zirFieldTypeRef(block, inst), - .ptr_to_int => try sema.zirPtrToInt(block, inst), - .align_of => try sema.zirAlignOf(block, inst), - .bool_to_int => try sema.zirBoolToInt(block, inst), - .embed_file => try sema.zirEmbedFile(block, inst), - .error_name => try sema.zirErrorName(block, inst), - .tag_name => try sema.zirTagName(block, inst), - .reify => try sema.zirReify(block, inst), - .type_name => try sema.zirTypeName(block, inst), - .frame_type => try sema.zirFrameType(block, inst), - .frame_size => try sema.zirFrameSize(block, inst), - .float_to_int => try sema.zirFloatToInt(block, inst), - .int_to_float => try sema.zirIntToFloat(block, inst), - .int_to_ptr => try sema.zirIntToPtr(block, inst), - .float_cast => try sema.zirFloatCast(block, inst), - .int_cast => try sema.zirIntCast(block, inst), - .err_set_cast => try sema.zirErrSetCast(block, inst), - .ptr_cast => try sema.zirPtrCast(block, inst), - .truncate => try sema.zirTruncate(block, inst), - .align_cast => try sema.zirAlignCast(block, inst), - .has_decl => try sema.zirHasDecl(block, inst), - .has_field => try sema.zirHasField(block, inst), - .clz => try sema.zirClz(block, inst), - .ctz => try sema.zirCtz(block, inst), - .pop_count => try sema.zirPopCount(block, inst), - .byte_swap => try sema.zirByteSwap(block, inst), - .bit_reverse => try sema.zirBitReverse(block, inst), - .div_exact => try sema.zirDivExact(block, inst), - .div_floor => try sema.zirDivFloor(block, inst), - .div_trunc => try sema.zirDivTrunc(block, inst), - .mod => try sema.zirMod(block, inst), - .rem => try sema.zirRem(block, inst), - .shl_exact => try sema.zirShlExact(block, inst), - .shr_exact => try sema.zirShrExact(block, inst), - .bit_offset_of => try sema.zirBitOffsetOf(block, inst), - .offset_of => try sema.zirOffsetOf(block, inst), - .cmpxchg_strong => try sema.zirCmpxchg(block, inst), - .cmpxchg_weak => try sema.zirCmpxchg(block, inst), - .splat => try sema.zirSplat(block, inst), - .reduce => try sema.zirReduce(block, inst), - .shuffle => try sema.zirShuffle(block, inst), - .atomic_load => try sema.zirAtomicLoad(block, inst), - .atomic_rmw => try sema.zirAtomicRmw(block, inst), - .atomic_store => try sema.zirAtomicStore(block, inst), - .mul_add => try sema.zirMulAdd(block, inst), - .builtin_call => try sema.zirBuiltinCall(block, inst), - .field_ptr_type => try sema.zirFieldPtrType(block, inst), - .field_parent_ptr => try sema.zirFieldParentPtr(block, inst), - .memcpy => try sema.zirMemcpy(block, inst), - .memset => try sema.zirMemset(block, inst), - .builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst), - .@"resume" => try sema.zirResume(block, inst), - .@"await" => try sema.zirAwait(block, inst, false), - .await_nosuspend => try sema.zirAwait(block, inst, true), - .extended => try sema.zirExtended(block, inst), - - .sqrt => try sema.zirUnaryMath(block, inst), - .sin => try sema.zirUnaryMath(block, inst), - .cos => try sema.zirUnaryMath(block, inst), - .exp => try sema.zirUnaryMath(block, inst), - .exp2 => try sema.zirUnaryMath(block, inst), - .log => try sema.zirUnaryMath(block, inst), - .log2 => try sema.zirUnaryMath(block, inst), - .log10 => try sema.zirUnaryMath(block, inst), - .fabs => try sema.zirUnaryMath(block, inst), - .floor => try sema.zirUnaryMath(block, inst), - .ceil => try sema.zirUnaryMath(block, inst), - .trunc => try sema.zirUnaryMath(block, inst), - .round => try sema.zirUnaryMath(block, inst), - - .opaque_decl => try sema.zirOpaqueDecl(block, inst, .parent), - .opaque_decl_anon => try sema.zirOpaqueDecl(block, inst, .anon), - .opaque_decl_func => try sema.zirOpaqueDecl(block, inst, .func), - .error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent), - .error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), - .error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), - - .add => try sema.zirArithmetic(block, inst), - .addwrap => try sema.zirArithmetic(block, inst), - .div => try sema.zirArithmetic(block, inst), - .mod_rem => try sema.zirArithmetic(block, inst), - .mul => try sema.zirArithmetic(block, inst), - .mulwrap => try sema.zirArithmetic(block, inst), - .sub => try sema.zirArithmetic(block, inst), - .subwrap => try sema.zirArithmetic(block, inst), - - // Instructions that we know to *always* be noreturn based solely on their tag. - // These functions match the return type of analyzeBody so that we can - // tail call them here. - .break_inline => return inst, - .condbr => return sema.zirCondbr(block, inst), - .@"break" => return sema.zirBreak(block, inst), - .compile_error => return sema.zirCompileError(block, inst), - .ret_coerce => return sema.zirRetCoerce(block, inst, true), - .ret_node => return sema.zirRetNode(block, inst), - .ret_err_value => return sema.zirRetErrValue(block, inst), - .@"unreachable" => return sema.zirUnreachable(block, inst), - .repeat => return sema.zirRepeat(block, inst), - .panic => return sema.zirPanic(block, inst), - // zig fmt: on - - // Instructions that we know can *never* be noreturn based solely on - // their tag. We avoid needlessly checking if they are noreturn and - // continue the loop. - // We also know that they cannot be referenced later, so we avoid - // putting them into the map. - .breakpoint => { - try sema.zirBreakpoint(block, inst); - i += 1; - continue; - }, - .fence => { - try sema.zirFence(block, inst); - i += 1; - continue; - }, - .dbg_stmt => { - try sema.zirDbgStmt(block, inst); - i += 1; - continue; - }, - .ensure_err_payload_void => { - try sema.zirEnsureErrPayloadVoid(block, inst); - i += 1; - continue; - }, - .ensure_result_non_error => { - try sema.zirEnsureResultNonError(block, inst); - i += 1; - continue; - }, - .ensure_result_used => { - try sema.zirEnsureResultUsed(block, inst); - i += 1; - continue; - }, - .set_eval_branch_quota => { - try sema.zirSetEvalBranchQuota(block, inst); - i += 1; - continue; - }, - .store => { - try sema.zirStore(block, inst); - i += 1; - continue; - }, - .store_node => { - try sema.zirStoreNode(block, inst); - i += 1; - continue; - }, - .store_to_block_ptr => { - try sema.zirStoreToBlockPtr(block, inst); - i += 1; - continue; - }, - .store_to_inferred_ptr => { - try sema.zirStoreToInferredPtr(block, inst); - i += 1; - continue; - }, - .resolve_inferred_alloc => { - try sema.zirResolveInferredAlloc(block, inst); - i += 1; - continue; - }, - .validate_struct_init_ptr => { - try sema.zirValidateStructInitPtr(block, inst); - i += 1; - continue; - }, - .validate_array_init_ptr => { - try sema.zirValidateArrayInitPtr(block, inst); - i += 1; - continue; - }, - .@"export" => { - try sema.zirExport(block, inst); - i += 1; - continue; - }, - .set_align_stack => { - try sema.zirSetAlignStack(block, inst); - i += 1; - continue; - }, - .set_cold => { - try sema.zirSetCold(block, inst); - i += 1; - continue; - }, - .set_float_mode => { - try sema.zirSetFloatMode(block, inst); - i += 1; - continue; - }, - .set_runtime_safety => { - try sema.zirSetRuntimeSafety(block, inst); - i += 1; - continue; - }, + //.alloc => try sema.zirAlloc(block, inst), + //.alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), + //.alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), + //.alloc_inferred_comptime => try sema.zirAllocInferredComptime(block, inst), + //.alloc_mut => try sema.zirAllocMut(block, inst), + //.alloc_comptime => try sema.zirAllocComptime(block, inst), + //.anyframe_type => try sema.zirAnyframeType(block, inst), + //.array_cat => try sema.zirArrayCat(block, inst), + //.array_mul => try sema.zirArrayMul(block, inst), + //.array_type => try sema.zirArrayType(block, inst), + //.array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), + //.vector_type => try sema.zirVectorType(block, inst), + //.as => try sema.zirAs(block, inst), + //.as_node => try sema.zirAsNode(block, inst), + //.bit_and => try sema.zirBitwise(block, inst, .bit_and), + //.bit_not => try sema.zirBitNot(block, inst), + //.bit_or => try sema.zirBitwise(block, inst, .bit_or), + //.bitcast => try sema.zirBitcast(block, inst), + //.bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst), + //.block => try sema.zirBlock(block, inst), + //.suspend_block => try sema.zirSuspendBlock(block, inst), + //.bool_not => try sema.zirBoolNot(block, inst), + //.bool_and => try sema.zirBoolOp(block, inst, false), + //.bool_or => try sema.zirBoolOp(block, inst, true), + //.bool_br_and => try sema.zirBoolBr(block, inst, false), + //.bool_br_or => try sema.zirBoolBr(block, inst, true), + //.c_import => try sema.zirCImport(block, inst), + //.call => try sema.zirCall(block, inst, .auto, false), + //.call_chkused => try sema.zirCall(block, inst, .auto, true), + //.call_compile_time => try sema.zirCall(block, inst, .compile_time, false), + //.call_nosuspend => try sema.zirCall(block, inst, .no_async, false), + //.call_async => try sema.zirCall(block, inst, .async_kw, false), + //.cmp_eq => try sema.zirCmp(block, inst, .eq), + //.cmp_gt => try sema.zirCmp(block, inst, .gt), + //.cmp_gte => try sema.zirCmp(block, inst, .gte), + //.cmp_lt => try sema.zirCmp(block, inst, .lt), + //.cmp_lte => try sema.zirCmp(block, inst, .lte), + //.cmp_neq => try sema.zirCmp(block, inst, .neq), + //.coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), + //.decl_ref => try sema.zirDeclRef(block, inst), + //.decl_val => try sema.zirDeclVal(block, inst), + //.load => try sema.zirLoad(block, inst), + //.elem_ptr => try sema.zirElemPtr(block, inst), + //.elem_ptr_node => try sema.zirElemPtrNode(block, inst), + //.elem_val => try sema.zirElemVal(block, inst), + //.elem_val_node => try sema.zirElemValNode(block, inst), + //.elem_type => try sema.zirElemType(block, inst), + //.enum_literal => try sema.zirEnumLiteral(block, inst), + //.enum_to_int => try sema.zirEnumToInt(block, inst), + //.int_to_enum => try sema.zirIntToEnum(block, inst), + //.err_union_code => try sema.zirErrUnionCode(block, inst), + //.err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst), + //.err_union_payload_safe => try sema.zirErrUnionPayload(block, inst, true), + //.err_union_payload_safe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, true), + //.err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst, false), + //.err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, false), + //.error_union_type => try sema.zirErrorUnionType(block, inst), + //.error_value => try sema.zirErrorValue(block, inst), + //.error_to_int => try sema.zirErrorToInt(block, inst), + //.int_to_error => try sema.zirIntToError(block, inst), + //.field_ptr => try sema.zirFieldPtr(block, inst), + //.field_ptr_named => try sema.zirFieldPtrNamed(block, inst), + //.field_val => try sema.zirFieldVal(block, inst), + //.field_val_named => try sema.zirFieldValNamed(block, inst), + //.func => try sema.zirFunc(block, inst, false), + //.func_inferred => try sema.zirFunc(block, inst, true), + //.import => try sema.zirImport(block, inst), + //.indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst), + //.int => try sema.zirInt(block, inst), + //.int_big => try sema.zirIntBig(block, inst), + //.float => try sema.zirFloat(block, inst), + //.float128 => try sema.zirFloat128(block, inst), + //.int_type => try sema.zirIntType(block, inst), + //.is_non_err => try sema.zirIsNonErr(block, inst), + //.is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), + //.is_non_null => try sema.zirIsNonNull(block, inst), + //.is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), + //.loop => try sema.zirLoop(block, inst), + //.merge_error_sets => try sema.zirMergeErrorSets(block, inst), + //.negate => try sema.zirNegate(block, inst, .sub), + //.negate_wrap => try sema.zirNegate(block, inst, .subwrap), + //.optional_payload_safe => try sema.zirOptionalPayload(block, inst, true), + //.optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true), + //.optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false), + //.optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false), + //.optional_type => try sema.zirOptionalType(block, inst), + //.param_type => try sema.zirParamType(block, inst), + //.ptr_type => try sema.zirPtrType(block, inst), + //.ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), + //.ref => try sema.zirRef(block, inst), + //.ret_err_value_code => try sema.zirRetErrValueCode(block, inst), + //.shl => try sema.zirShl(block, inst), + //.shr => try sema.zirShr(block, inst), + //.slice_end => try sema.zirSliceEnd(block, inst), + //.slice_sentinel => try sema.zirSliceSentinel(block, inst), + //.slice_start => try sema.zirSliceStart(block, inst), + //.str => try sema.zirStr(block, inst), + //.switch_block => try sema.zirSwitchBlock(block, inst, false, .none), + //.switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none), + //.switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"), + //.switch_block_else_multi => try sema.zirSwitchBlockMulti(block, inst, false, .@"else"), + //.switch_block_under => try sema.zirSwitchBlock(block, inst, false, .under), + //.switch_block_under_multi => try sema.zirSwitchBlockMulti(block, inst, false, .under), + //.switch_block_ref => try sema.zirSwitchBlock(block, inst, true, .none), + //.switch_block_ref_multi => try sema.zirSwitchBlockMulti(block, inst, true, .none), + //.switch_block_ref_else => try sema.zirSwitchBlock(block, inst, true, .@"else"), + //.switch_block_ref_else_multi => try sema.zirSwitchBlockMulti(block, inst, true, .@"else"), + //.switch_block_ref_under => try sema.zirSwitchBlock(block, inst, true, .under), + //.switch_block_ref_under_multi => try sema.zirSwitchBlockMulti(block, inst, true, .under), + //.switch_capture => try sema.zirSwitchCapture(block, inst, false, false), + //.switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), + //.switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), + //.switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), + //.switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), + //.switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), + //.type_info => try sema.zirTypeInfo(block, inst), + //.size_of => try sema.zirSizeOf(block, inst), + //.bit_size_of => try sema.zirBitSizeOf(block, inst), + //.typeof => try sema.zirTypeof(block, inst), + //.typeof_elem => try sema.zirTypeofElem(block, inst), + //.log2_int_type => try sema.zirLog2IntType(block, inst), + //.typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst), + //.xor => try sema.zirBitwise(block, inst, .xor), + //.struct_init_empty => try sema.zirStructInitEmpty(block, inst), + //.struct_init => try sema.zirStructInit(block, inst, false), + //.struct_init_ref => try sema.zirStructInit(block, inst, true), + //.struct_init_anon => try sema.zirStructInitAnon(block, inst, false), + //.struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true), + //.array_init => try sema.zirArrayInit(block, inst, false), + //.array_init_ref => try sema.zirArrayInit(block, inst, true), + //.array_init_anon => try sema.zirArrayInitAnon(block, inst, false), + //.array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true), + //.union_init_ptr => try sema.zirUnionInitPtr(block, inst), + //.field_type => try sema.zirFieldType(block, inst), + //.field_type_ref => try sema.zirFieldTypeRef(block, inst), + //.ptr_to_int => try sema.zirPtrToInt(block, inst), + //.align_of => try sema.zirAlignOf(block, inst), + //.bool_to_int => try sema.zirBoolToInt(block, inst), + //.embed_file => try sema.zirEmbedFile(block, inst), + //.error_name => try sema.zirErrorName(block, inst), + //.tag_name => try sema.zirTagName(block, inst), + //.reify => try sema.zirReify(block, inst), + //.type_name => try sema.zirTypeName(block, inst), + //.frame_type => try sema.zirFrameType(block, inst), + //.frame_size => try sema.zirFrameSize(block, inst), + //.float_to_int => try sema.zirFloatToInt(block, inst), + //.int_to_float => try sema.zirIntToFloat(block, inst), + //.int_to_ptr => try sema.zirIntToPtr(block, inst), + //.float_cast => try sema.zirFloatCast(block, inst), + //.int_cast => try sema.zirIntCast(block, inst), + //.err_set_cast => try sema.zirErrSetCast(block, inst), + //.ptr_cast => try sema.zirPtrCast(block, inst), + //.truncate => try sema.zirTruncate(block, inst), + //.align_cast => try sema.zirAlignCast(block, inst), + //.has_decl => try sema.zirHasDecl(block, inst), + //.has_field => try sema.zirHasField(block, inst), + //.clz => try sema.zirClz(block, inst), + //.ctz => try sema.zirCtz(block, inst), + //.pop_count => try sema.zirPopCount(block, inst), + //.byte_swap => try sema.zirByteSwap(block, inst), + //.bit_reverse => try sema.zirBitReverse(block, inst), + //.div_exact => try sema.zirDivExact(block, inst), + //.div_floor => try sema.zirDivFloor(block, inst), + //.div_trunc => try sema.zirDivTrunc(block, inst), + //.mod => try sema.zirMod(block, inst), + //.rem => try sema.zirRem(block, inst), + //.shl_exact => try sema.zirShlExact(block, inst), + //.shr_exact => try sema.zirShrExact(block, inst), + //.bit_offset_of => try sema.zirBitOffsetOf(block, inst), + //.offset_of => try sema.zirOffsetOf(block, inst), + //.cmpxchg_strong => try sema.zirCmpxchg(block, inst), + //.cmpxchg_weak => try sema.zirCmpxchg(block, inst), + //.splat => try sema.zirSplat(block, inst), + //.reduce => try sema.zirReduce(block, inst), + //.shuffle => try sema.zirShuffle(block, inst), + //.atomic_load => try sema.zirAtomicLoad(block, inst), + //.atomic_rmw => try sema.zirAtomicRmw(block, inst), + //.atomic_store => try sema.zirAtomicStore(block, inst), + //.mul_add => try sema.zirMulAdd(block, inst), + //.builtin_call => try sema.zirBuiltinCall(block, inst), + //.field_ptr_type => try sema.zirFieldPtrType(block, inst), + //.field_parent_ptr => try sema.zirFieldParentPtr(block, inst), + //.memcpy => try sema.zirMemcpy(block, inst), + //.memset => try sema.zirMemset(block, inst), + //.builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst), + //.@"resume" => try sema.zirResume(block, inst), + //.@"await" => try sema.zirAwait(block, inst, false), + //.await_nosuspend => try sema.zirAwait(block, inst, true), + //.extended => try sema.zirExtended(block, inst), + + //.sqrt => try sema.zirUnaryMath(block, inst), + //.sin => try sema.zirUnaryMath(block, inst), + //.cos => try sema.zirUnaryMath(block, inst), + //.exp => try sema.zirUnaryMath(block, inst), + //.exp2 => try sema.zirUnaryMath(block, inst), + //.log => try sema.zirUnaryMath(block, inst), + //.log2 => try sema.zirUnaryMath(block, inst), + //.log10 => try sema.zirUnaryMath(block, inst), + //.fabs => try sema.zirUnaryMath(block, inst), + //.floor => try sema.zirUnaryMath(block, inst), + //.ceil => try sema.zirUnaryMath(block, inst), + //.trunc => try sema.zirUnaryMath(block, inst), + //.round => try sema.zirUnaryMath(block, inst), + + //.opaque_decl => try sema.zirOpaqueDecl(block, inst, .parent), + //.opaque_decl_anon => try sema.zirOpaqueDecl(block, inst, .anon), + //.opaque_decl_func => try sema.zirOpaqueDecl(block, inst, .func), + //.error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent), + //.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), + //.error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), + + //.add => try sema.zirArithmetic(block, inst), + //.addwrap => try sema.zirArithmetic(block, inst), + //.div => try sema.zirArithmetic(block, inst), + //.mod_rem => try sema.zirArithmetic(block, inst), + //.mul => try sema.zirArithmetic(block, inst), + //.mulwrap => try sema.zirArithmetic(block, inst), + //.sub => try sema.zirArithmetic(block, inst), + //.subwrap => try sema.zirArithmetic(block, inst), + + //// Instructions that we know to *always* be noreturn based solely on their tag. + //// These functions match the return type of analyzeBody so that we can + //// tail call them here. + //.break_inline => return inst, + //.condbr => return sema.zirCondbr(block, inst), + //.@"break" => return sema.zirBreak(block, inst), + //.compile_error => return sema.zirCompileError(block, inst), + //.ret_coerce => return sema.zirRetCoerce(block, inst, true), + //.ret_node => return sema.zirRetNode(block, inst), + //.ret_err_value => return sema.zirRetErrValue(block, inst), + //.@"unreachable" => return sema.zirUnreachable(block, inst), + //.repeat => return sema.zirRepeat(block, inst), + //.panic => return sema.zirPanic(block, inst), + //// zig fmt: on + + //// Instructions that we know can *never* be noreturn based solely on + //// their tag. We avoid needlessly checking if they are noreturn and + //// continue the loop. + //// We also know that they cannot be referenced later, so we avoid + //// putting them into the map. + //.breakpoint => { + // try sema.zirBreakpoint(block, inst); + // i += 1; + // continue; + //}, + //.fence => { + // try sema.zirFence(block, inst); + // i += 1; + // continue; + //}, + //.dbg_stmt => { + // try sema.zirDbgStmt(block, inst); + // i += 1; + // continue; + //}, + //.ensure_err_payload_void => { + // try sema.zirEnsureErrPayloadVoid(block, inst); + // i += 1; + // continue; + //}, + //.ensure_result_non_error => { + // try sema.zirEnsureResultNonError(block, inst); + // i += 1; + // continue; + //}, + //.ensure_result_used => { + // try sema.zirEnsureResultUsed(block, inst); + // i += 1; + // continue; + //}, + //.set_eval_branch_quota => { + // try sema.zirSetEvalBranchQuota(block, inst); + // i += 1; + // continue; + //}, + //.store => { + // try sema.zirStore(block, inst); + // i += 1; + // continue; + //}, + //.store_node => { + // try sema.zirStoreNode(block, inst); + // i += 1; + // continue; + //}, + //.store_to_block_ptr => { + // try sema.zirStoreToBlockPtr(block, inst); + // i += 1; + // continue; + //}, + //.store_to_inferred_ptr => { + // try sema.zirStoreToInferredPtr(block, inst); + // i += 1; + // continue; + //}, + //.resolve_inferred_alloc => { + // try sema.zirResolveInferredAlloc(block, inst); + // i += 1; + // continue; + //}, + //.validate_struct_init_ptr => { + // try sema.zirValidateStructInitPtr(block, inst); + // i += 1; + // continue; + //}, + //.validate_array_init_ptr => { + // try sema.zirValidateArrayInitPtr(block, inst); + // i += 1; + // continue; + //}, + //.@"export" => { + // try sema.zirExport(block, inst); + // i += 1; + // continue; + //}, + //.set_align_stack => { + // try sema.zirSetAlignStack(block, inst); + // i += 1; + // continue; + //}, + //.set_cold => { + // try sema.zirSetCold(block, inst); + // i += 1; + // continue; + //}, + //.set_float_mode => { + // try sema.zirSetFloatMode(block, inst); + // i += 1; + // continue; + //}, + //.set_runtime_safety => { + // try sema.zirSetRuntimeSafety(block, inst); + // i += 1; + // continue; + //}, // Special case instructions to handle comptime control flow. .repeat_inline => { @@ -505,37 +505,38 @@ pub fn analyzeBody( i = 0; continue; }, - .block_inline => blk: { - // Directly analyze the block body without introducing a new block. - const inst_data = datas[inst].pl_node; - const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); - const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; - const break_inst = try sema.analyzeBody(block, inline_body); - const break_data = datas[break_inst].@"break"; - if (inst == break_data.block_inst) { - break :blk try sema.resolveInst(break_data.operand); - } else { - return break_inst; - } - }, - .condbr_inline => blk: { - const inst_data = datas[inst].pl_node; - const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; - const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); - const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; - const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); - const inline_body = if (cond.val.toBool()) then_body else else_body; - const break_inst = try sema.analyzeBody(block, inline_body); - const break_data = datas[break_inst].@"break"; - if (inst == break_data.block_inst) { - break :blk try sema.resolveInst(break_data.operand); - } else { - return break_inst; - } - }, + //.block_inline => blk: { + // // Directly analyze the block body without introducing a new block. + // const inst_data = datas[inst].pl_node; + // const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); + // const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; + // const break_inst = try sema.analyzeBody(block, inline_body); + // const break_data = datas[break_inst].@"break"; + // if (inst == break_data.block_inst) { + // break :blk try sema.resolveInst(break_data.operand); + // } else { + // return break_inst; + // } + //}, + //.condbr_inline => blk: { + // const inst_data = datas[inst].pl_node; + // const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; + // const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); + // const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; + // const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; + // const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); + // const inline_body = if (cond.val.toBool()) then_body else else_body; + // const break_inst = try sema.analyzeBody(block, inline_body); + // const break_data = datas[break_inst].@"break"; + // if (inst == break_data.block_inst) { + // break :blk try sema.resolveInst(break_data.operand); + // } else { + // return break_inst; + // } + //}, + else => @panic("TODO remove else prong"), }; - if (air_inst.ty.isNoReturn()) + if (sema.getAirType(air_inst).isNoReturn()) return always_noreturn; try map.put(sema.gpa, inst, air_inst); i += 1; @@ -577,18 +578,13 @@ fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } } -/// TODO when we rework AIR memory layout, this function will no longer have a possible error. -pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) error{OutOfMemory}!Air.Inst.Index { +pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) Air.Inst.Ref { var i: usize = @enumToInt(zir_ref); // First section of indexes correspond to a set number of constant values. if (i < Zir.Inst.Ref.typed_value_map.len) { - // TODO when we rework AIR memory layout, this function can be as simple as: - // if (zir_ref < Zir.const_inst_list.len + sema.param_count) - // return zir_ref; - // Until then we allocate memory for a new, mutable `ir.Inst` to match what - // AIR expects. - return sema.mod.constInst(sema.arena, .unneeded, Zir.Inst.Ref.typed_value_map[i]); + // We intentionally map the same indexes to the same values between ZIR and AIR. + return zir_ref; } i -= Zir.Inst.Ref.typed_value_map.len; @@ -1256,7 +1252,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const arg_name = inst_data.get(sema.code); const arg_index = sema.next_arg_index; @@ -1271,7 +1267,7 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air // Set the name of the Air.Arg instruction for use by codegen debug info. const air_arg = sema.param_inst_list[arg_index]; - sema.air.instructions.items(.data)[air_arg].ty_str.str = inst_data.start; + sema.air_instructions.items(.data)[air_arg].ty_str.str = inst_data.start; return air_arg; } @@ -7942,6 +7938,18 @@ fn enumFieldSrcLoc( } else unreachable; } +fn getAirType(sema: *Sema, air_ref: Air.Inst.Ref) Type { + var i: usize = @enumToInt(air_ref); + if (i < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; + } + i -= Air.Inst.Ref.typed_value_map.len; + const air_tags = sema.air_instructions.items(.tag); + const air_datas = sema.air_instructions.items(.data); + assert(air_tags[i] == .const_ty); + return air_datas[i].ty; +} + pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { switch (ty.tag()) { .u8 => return .u8_type, diff --git a/src/codegen.zig b/src/codegen.zig index eaf910977e..a6c4b5ad3c 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -282,7 +282,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return struct { gpa: *Allocator, - air: *const Air, + air: Air, + liveness: Liveness, bin_file: *link.File, target: *const std.Target, mod_fn: *const Module.Fn, @@ -468,8 +469,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { var function = Self{ .gpa = bin_file.allocator, - .air = &air, - .liveness = &liveness, + .air = air, + .liveness = liveness, .target = &bin_file.options.target, .bin_file = bin_file, .mod_fn = module_fn, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index e3f2423746..4743494f35 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -6,7 +6,6 @@ const log = std.log.scoped(.c); const link = @import("../link.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); -const Air = @import("../Air.zig"); const Value = @import("../value.zig").Value; const Type = @import("../type.zig").Type; const TypedValue = @import("../TypedValue.zig"); @@ -14,6 +13,8 @@ const C = link.File.C; const Decl = Module.Decl; const trace = @import("../tracy.zig").trace; const LazySrcLoc = Module.LazySrcLoc; +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const Mutability = enum { Const, Mut }; @@ -37,7 +38,7 @@ const BlockData = struct { result: CValue, }; -pub const CValueMap = std.AutoHashMap(*Inst, CValue); +pub const CValueMap = std.AutoHashMap(Air.Inst.Index, CValue); pub const TypedefMap = std.ArrayHashMap( Type, struct { name: []const u8, rendered: []u8 }, @@ -93,6 +94,8 @@ pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) { /// It is not available when generating .h file. pub const Object = struct { dg: DeclGen, + air: Air, + liveness: Liveness, gpa: *mem.Allocator, code: std.ArrayList(u8), value_map: CValueMap, @@ -102,7 +105,7 @@ pub const Object = struct { next_block_index: usize = 0, indent_writer: IndentWriter(std.ArrayList(u8).Writer), - fn resolveInst(o: *Object, inst: *Inst) !CValue { + fn resolveInst(o: *Object, inst: Air.Inst.Index) !CValue { if (inst.value()) |_| { return CValue{ .constant = inst }; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 45ee2d9bb8..ddf2883259 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -277,6 +277,9 @@ pub const Object = struct { } pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void { + const tracy = trace(@src()); + defer tracy.end(); + var dg: DeclGen = .{ .object = self, .module = module, diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 3d704a8dc5..4da320b087 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -159,7 +159,8 @@ pub const DeclGen = struct { /// The SPIR-V module code should be put in. spv: *SPIRVModule, - air: *const Air, + air: Air, + liveness: Liveness, /// An array of function argument result-ids. Each index corresponds with the /// function argument of the same index. diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 45b00ddfad..912577a358 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -9,13 +9,14 @@ const wasm = std.wasm; const Module = @import("../Module.zig"); const Decl = Module.Decl; -const Air = @import("../Air.zig"); const Type = @import("../type.zig").Type; const Value = @import("../value.zig").Value; const Compilation = @import("../Compilation.zig"); const LazySrcLoc = Module.LazySrcLoc; const link = @import("../link.zig"); const TypedValue = @import("../TypedValue.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); /// Wasm Value, created when generating an instruction const WValue = union(enum) { @@ -491,6 +492,8 @@ pub const Context = struct { /// Reference to the function declaration the code /// section belongs to decl: *Decl, + air: Air, + liveness: Liveness, gpa: *mem.Allocator, /// Table to save `WValue`'s generated by an `Inst` values: ValueTable, @@ -710,52 +713,53 @@ pub const Context = struct { } } + pub fn genFunc(self: *Context, func: *Module.Fn) InnerError!Result { + try self.genFunctype(); + + // Write instructions + // TODO: check for and handle death of instructions + + // Reserve space to write the size after generating the code as well as space for locals count + try self.code.resize(10); + + try self.genBody(func.body); + + // finally, write our local types at the 'offset' position + { + leb.writeUnsignedFixed(5, self.code.items[5..10], @intCast(u32, self.locals.items.len)); + + // offset into 'code' section where we will put our locals types + var local_offset: usize = 10; + + // emit the actual locals amount + for (self.locals.items) |local| { + var buf: [6]u8 = undefined; + leb.writeUnsignedFixed(5, buf[0..5], @as(u32, 1)); + buf[5] = local; + try self.code.insertSlice(local_offset, &buf); + local_offset += 6; + } + } + + const writer = self.code.writer(); + try writer.writeByte(wasm.opcode(.end)); + + // Fill in the size of the generated code to the reserved space at the + // beginning of the buffer. + const size = self.code.items.len - 5 + self.decl.fn_link.wasm.idx_refs.items.len * 5; + leb.writeUnsignedFixed(5, self.code.items[0..5], @intCast(u32, size)); + + // codegen data has been appended to `code` + return Result.appended; + } + /// Generates the wasm bytecode for the function declaration belonging to `Context` pub fn gen(self: *Context, typed_value: TypedValue) InnerError!Result { switch (typed_value.ty.zigTypeTag()) { .Fn => { try self.genFunctype(); - - // Write instructions - // TODO: check for and handle death of instructions - const mod_fn = blk: { - if (typed_value.val.castTag(.function)) |func| break :blk func.data; - if (typed_value.val.castTag(.extern_fn)) |_| return Result.appended; // don't need code body for extern functions - unreachable; - }; - - // Reserve space to write the size after generating the code as well as space for locals count - try self.code.resize(10); - - try self.genBody(mod_fn.body); - - // finally, write our local types at the 'offset' position - { - leb.writeUnsignedFixed(5, self.code.items[5..10], @intCast(u32, self.locals.items.len)); - - // offset into 'code' section where we will put our locals types - var local_offset: usize = 10; - - // emit the actual locals amount - for (self.locals.items) |local| { - var buf: [6]u8 = undefined; - leb.writeUnsignedFixed(5, buf[0..5], @as(u32, 1)); - buf[5] = local; - try self.code.insertSlice(local_offset, &buf); - local_offset += 6; - } - } - - const writer = self.code.writer(); - try writer.writeByte(wasm.opcode(.end)); - - // Fill in the size of the generated code to the reserved space at the - // beginning of the buffer. - const size = self.code.items.len - 5 + self.decl.fn_link.wasm.idx_refs.items.len * 5; - leb.writeUnsignedFixed(5, self.code.items[0..5], @intCast(u32, size)); - - // codegen data has been appended to `code` - return Result.appended; + if (typed_value.val.castTag(.extern_fn)) |_| return Result.appended; // don't need code body for extern functions + return self.fail("TODO implement wasm codegen for function pointers", .{}); }, .Array => { if (typed_value.val.castTag(.bytes)) |payload| { diff --git a/src/link.zig b/src/link.zig index 02d9afaf07..2403180ec8 100644 --- a/src/link.zig +++ b/src/link.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const Allocator = std.mem.Allocator; const fs = std.fs; @@ -14,8 +15,10 @@ const Cache = @import("Cache.zig"); const build_options = @import("build_options"); const LibCInstallation = @import("libc_installation.zig").LibCInstallation; const wasi_libc = @import("wasi_libc.zig"); +const Air = @import("Air.zig"); +const Liveness = @import("Liveness.zig"); -pub const producer_string = if (std.builtin.is_test) "zig test" else "zig " ++ build_options.version; +pub const producer_string = if (builtin.is_test) "zig test" else "zig " ++ build_options.version; pub const Emit = struct { /// Where the output will go. @@ -313,13 +316,34 @@ pub const File = struct { log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty }); assert(decl.has_tv); switch (base.tag) { - .coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl), - .elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl), + // zig fmt: off + .coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl), + .elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl), .macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl), - .c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl), - .wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl), + .c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl), + .wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl), .spirv => return @fieldParentPtr(SpirV, "base", base).updateDecl(module, decl), .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDecl(module, decl), + // zig fmt: on + } + } + + /// May be called before or after updateDeclExports but must be called + /// after allocateDeclIndexes for any given Decl. + pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + log.debug("updateFunc {*} ({s}), type={}", .{ + func.owner_decl, func.owner_decl.name, func.owner_decl.ty, + }); + switch (base.tag) { + // zig fmt: off + .coff => return @fieldParentPtr(Coff, "base", base).updateFunc(module, func, air, liveness), + .elf => return @fieldParentPtr(Elf, "base", base).updateFunc(module, func, air, liveness), + .macho => return @fieldParentPtr(MachO, "base", base).updateFunc(module, func, air, liveness), + .c => return @fieldParentPtr(C, "base", base).updateFunc(module, func, air, liveness), + .wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func, air, liveness), + .spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func, air, liveness), + .plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func, air, liveness), + // zig fmt: on } } diff --git a/src/link/C.zig b/src/link/C.zig index 53561d16cd..09f789f7d1 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -2,14 +2,17 @@ const std = @import("std"); const mem = std.mem; const assert = std.debug.assert; const Allocator = std.mem.Allocator; +const fs = std.fs; + +const C = @This(); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); -const fs = std.fs; const codegen = @import("../codegen/c.zig"); const link = @import("../link.zig"); const trace = @import("../tracy.zig").trace; -const C = @This(); const Type = @import("../type.zig").Type; +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); pub const base_tag: link.File.Tag = .c; pub const zig_h = @embedFile("C/zig.h"); @@ -95,10 +98,7 @@ fn deinitDecl(gpa: *Allocator, decl: *Module.Decl) void { decl.fn_link.c.typedefs.deinit(gpa); } -pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { - const tracy = trace(@src()); - defer tracy.end(); - +pub fn finishUpdateDecl(self: *C, module: *Module, decl: *Module.Decl, air: Air, liveness: Liveness) !void { // Keep track of all decls so we can iterate over them on flush(). _ = try self.decl_table.getOrPut(self.base.allocator, decl); @@ -126,6 +126,8 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { .code = code.toManaged(module.gpa), .value_map = codegen.CValueMap.init(module.gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code + .air = air, + .liveness = liveness, }; object.indent_writer = .{ .underlying_writer = object.code.writer() }; defer { @@ -157,6 +159,20 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { code.shrinkAndFree(module.gpa, code.items.len); } +pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + const tracy = trace(@src()); + defer tracy.end(); + + return self.finishUpdateDecl(module, func.owner_decl, air, liveness); +} + +pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { + const tracy = trace(@src()); + defer tracy.end(); + + return self.finishUpdateDecl(module, decl, undefined, undefined); +} + pub fn updateDeclLineNumber(self: *C, module: *Module, decl: *Module.Decl) !void { // The C backend does not have the ability to fix line numbers without re-generating // the entire Decl. diff --git a/src/link/Coff.zig b/src/link/Coff.zig index b466cf9136..44442b73a3 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1,6 +1,7 @@ const Coff = @This(); const std = @import("std"); +const builtin = @import("builtin"); const log = std.log.scoped(.link); const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -17,6 +18,8 @@ const build_options = @import("build_options"); const Cache = @import("../Cache.zig"); const mingw = @import("../mingw.zig"); const llvm_backend = @import("../codegen/llvm.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const allocation_padding = 4 / 3; const minimum_text_block_size = 64 * allocation_padding; @@ -653,19 +656,58 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { } } -pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { - // TODO COFF/PE debug information - // TODO Implement exports +pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .coff and builtin.object_format != .pe) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } const tracy = trace(@src()); defer tracy.end(); - if (build_options.have_llvm) - if (self.llvm_object) |llvm_object| return try llvm_object.updateDecl(module, decl); + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + const res = try codegen.generateFunction( + &self.base, + decl.srcLoc(), + func, + air, + liveness, + &code_buffer, + .none, + ); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + return self.finishUpdateDecl(module, func.owner_decl, code); +} + +pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native and builtin.object_format != .coff and builtin.object_format != .pe) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } + const tracy = trace(@src()); + defer tracy.end(); if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? } + // TODO COFF/PE debug information + // TODO Implement exports + var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); @@ -683,6 +725,10 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { }, }; + return self.finishUpdateDecl(module, func.owner_decl, code); +} + +fn finishUpdateDecl(self: *Coff, decl: *Module.Decl, code: []const u8) !void { const required_alignment = decl.ty.abiAlignment(self.base.options.target); const curr_size = decl.link.coff.size; if (curr_size != 0) { diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 90224866ba..0d05b97846 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1,6 +1,7 @@ const Elf = @This(); const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const assert = std.debug.assert; const Allocator = std.mem.Allocator; @@ -10,7 +11,6 @@ const log = std.log.scoped(.link); const DW = std.dwarf; const leb128 = std.leb; -const Air = @import("../Air.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); const codegen = @import("../codegen.zig"); @@ -26,6 +26,8 @@ const glibc = @import("../glibc.zig"); const musl = @import("../musl.zig"); const Cache = @import("../Cache.zig"); const llvm_backend = @import("../codegen/llvm.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const default_entry_addr = 0x8000000; @@ -2155,138 +2157,17 @@ pub fn freeDecl(self: *Elf, decl: *Module.Decl) void { } } -pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { - const tracy = trace(@src()); - defer tracy.end(); - - if (build_options.have_llvm) - if (self.llvm_object) |llvm_object| return try llvm_object.updateDecl(module, decl); - - if (decl.val.tag() == .extern_fn) { - return; // TODO Should we do more when front-end analyzed extern decl? - } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; - if (variable.is_extern) { - return; // TODO Should we do more when front-end analyzed extern decl? - } - } - - var code_buffer = std.ArrayList(u8).init(self.base.allocator); - defer code_buffer.deinit(); - - var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); - defer dbg_line_buffer.deinit(); - - var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); - defer dbg_info_buffer.deinit(); - - var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; - defer { - var it = dbg_info_type_relocs.valueIterator(); - while (it.next()) |value| { - value.relocs.deinit(self.base.allocator); - } - dbg_info_type_relocs.deinit(self.base.allocator); - } - - const is_fn: bool = switch (decl.ty.zigTypeTag()) { - .Fn => true, - else => false, - }; - if (is_fn) { - // For functions we need to add a prologue to the debug line program. - try dbg_line_buffer.ensureCapacity(26); - - const func = decl.val.castTag(.function).?.data; - const line_off = @intCast(u28, decl.src_line + func.lbrace_line); - - const ptr_width_bytes = self.ptrWidthBytes(); - dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{ - DW.LNS_extended_op, - ptr_width_bytes + 1, - DW.LNE_set_address, - }); - // This is the "relocatable" vaddr, corresponding to `code_buffer` index `0`. - assert(dbg_line_vaddr_reloc_index == dbg_line_buffer.items.len); - dbg_line_buffer.items.len += ptr_width_bytes; - - dbg_line_buffer.appendAssumeCapacity(DW.LNS_advance_line); - // This is the "relocatable" relative line offset from the previous function's end curly - // to this function's begin curly. - assert(self.getRelocDbgLineOff() == dbg_line_buffer.items.len); - // Here we use a ULEB128-fixed-4 to make sure this field can be overwritten later. - leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line_off); - - dbg_line_buffer.appendAssumeCapacity(DW.LNS_set_file); - assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len); - // Once we support more than one source file, this will have the ability to be more - // than one possible value. - const file_index = 1; - leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index); - - // Emit a line for the begin curly with prologue_end=false. The codegen will - // do the work of setting prologue_end=true and epilogue_begin=true. - dbg_line_buffer.appendAssumeCapacity(DW.LNS_copy); - - // .debug_info subprogram - const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1]; - try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len); - - const fn_ret_type = decl.ty.fnReturnType(); - const fn_ret_has_bits = fn_ret_type.hasCodeGenBits(); - if (fn_ret_has_bits) { - dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram); - } else { - dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram_retvoid); - } - // These get overwritten after generating the machine code. These values are - // "relocations" and have to be in this fixed place so that functions can be - // moved in virtual address space. - assert(dbg_info_low_pc_reloc_index == dbg_info_buffer.items.len); - dbg_info_buffer.items.len += ptr_width_bytes; // DW.AT_low_pc, DW.FORM_addr - assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len); - dbg_info_buffer.items.len += 4; // DW.AT_high_pc, DW.FORM_data4 - if (fn_ret_has_bits) { - const gop = try dbg_info_type_relocs.getOrPut(self.base.allocator, fn_ret_type); - if (!gop.found_existing) { - gop.value_ptr.* = .{ - .off = undefined, - .relocs = .{}, - }; - } - try gop.value_ptr.relocs.append(self.base.allocator, @intCast(u32, dbg_info_buffer.items.len)); - dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4 - } - dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string - } else { - // TODO implement .debug_info for global variables +fn deinitRelocs(gpa: *Allocator, table: *File.DbgInfoTypeRelocsTable) void { + var it = table.valueIterator(); + while (it.next()) |value| { + value.relocs.deinit(gpa); } - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ - .ty = decl.ty, - .val = decl_val, - }, &code_buffer, .{ - .dwarf = .{ - .dbg_line = &dbg_line_buffer, - .dbg_info = &dbg_info_buffer, - .dbg_info_type_relocs = &dbg_info_type_relocs, - }, - }); - const code = switch (res) { - .externally_managed => |x| x, - .appended => code_buffer.items, - .fail => |em| { - decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); - return; - }, - }; + table.deinit(gpa); +} +fn updateDeclCode(self: *Elf, decl: *Module.Decl, code: []const u8, stt_bits: u8) !*elf.Elf64_Sym { const required_alignment = decl.ty.abiAlignment(self.base.options.target); - const stt_bits: u8 = if (is_fn) elf.STT_FUNC else elf.STT_OBJECT; - assert(decl.link.elf.local_sym_index != 0); // Caller forgot to allocateDeclIndexes() const local_sym = &self.local_symbols.items[decl.link.elf.local_sym_index]; if (local_sym.st_size != 0) { @@ -2338,128 +2219,16 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { const file_offset = self.sections.items[self.text_section_index.?].sh_offset + section_offset; try self.base.file.?.pwriteAll(code, file_offset); - const target_endian = self.base.options.target.cpu.arch.endian(); - - const text_block = &decl.link.elf; - - // If the Decl is a function, we need to update the .debug_line program. - if (is_fn) { - // Perform the relocations based on vaddr. - switch (self.ptr_width) { - .p32 => { - { - const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); - } - { - const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); - } - }, - .p64 => { - { - const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..8]; - mem.writeInt(u64, ptr, local_sym.st_value, target_endian); - } - { - const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..8]; - mem.writeInt(u64, ptr, local_sym.st_value, target_endian); - } - }, - } - { - const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_size), target_endian); - } - - try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS_extended_op, 1, DW.LNE_end_sequence }); - - // Now we have the full contents and may allocate a region to store it. - - // This logic is nearly identical to the logic below in `updateDeclDebugInfoAllocation` for - // `TextBlock` and the .debug_info. If you are editing this logic, you - // probably need to edit that logic too. - - const debug_line_sect = &self.sections.items[self.debug_line_section_index.?]; - const src_fn = &decl.fn_link.elf; - src_fn.len = @intCast(u32, dbg_line_buffer.items.len); - if (self.dbg_line_fn_last) |last| not_first: { - if (src_fn.next) |next| { - // Update existing function - non-last item. - if (src_fn.off + src_fn.len + min_nop_size > next.off) { - // It grew too big, so we move it to a new location. - if (src_fn.prev) |prev| { - self.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {}; - prev.next = src_fn.next; - } - assert(src_fn.prev != next); - next.prev = src_fn.prev; - src_fn.next = null; - // Populate where it used to be with NOPs. - const file_pos = debug_line_sect.sh_offset + src_fn.off; - try self.pwriteDbgLineNops(0, &[0]u8{}, src_fn.len, file_pos); - // TODO Look at the free list before appending at the end. - src_fn.prev = last; - last.next = src_fn; - self.dbg_line_fn_last = src_fn; - - src_fn.off = last.off + padToIdeal(last.len); - } - } else if (src_fn.prev == null) { - if (src_fn == last) { - // Special case: there is only 1 function and it is being updated. - // In this case there is nothing to do. The function's length has - // already been updated, and the logic below takes care of - // resizing the .debug_line section. - break :not_first; - } - // Append new function. - // TODO Look at the free list before appending at the end. - src_fn.prev = last; - last.next = src_fn; - self.dbg_line_fn_last = src_fn; - - src_fn.off = last.off + padToIdeal(last.len); - } - } else { - // This is the first function of the Line Number Program. - self.dbg_line_fn_first = src_fn; - self.dbg_line_fn_last = src_fn; - - src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes()); - } - - const last_src_fn = self.dbg_line_fn_last.?; - const needed_size = last_src_fn.off + last_src_fn.len; - if (needed_size != debug_line_sect.sh_size) { - if (needed_size > self.allocatedSize(debug_line_sect.sh_offset)) { - const new_offset = self.findFreeSpace(needed_size, 1); - const existing_size = last_src_fn.off; - log.debug("moving .debug_line section: {d} bytes from 0x{x} to 0x{x}", .{ - existing_size, - debug_line_sect.sh_offset, - new_offset, - }); - const amt = try self.base.file.?.copyRangeAll(debug_line_sect.sh_offset, self.base.file.?, new_offset, existing_size); - if (amt != existing_size) return error.InputOutput; - debug_line_sect.sh_offset = new_offset; - } - debug_line_sect.sh_size = needed_size; - self.shdr_table_dirty = true; // TODO look into making only the one section dirty - self.debug_line_header_dirty = true; - } - const prev_padding_size: u32 = if (src_fn.prev) |prev| src_fn.off - (prev.off + prev.len) else 0; - const next_padding_size: u32 = if (src_fn.next) |next| next.off - (src_fn.off + src_fn.len) else 0; - - // We only have support for one compilation unit so far, so the offsets are directly - // from the .debug_line section. - const file_pos = debug_line_sect.sh_offset + src_fn.off; - try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos); - - // .debug_info - End the TAG_subprogram children. - try dbg_info_buffer.append(0); - } + return local_sym; +} +fn finishUpdateDecl( + self: *Elf, + module: *Module, + decl: *Module.Decl, + dbg_info_type_relocs: *File.DbgInfoTypeRelocsTable, + dbg_info_buffer: *std.ArrayList(u8), +) !void { // Now we emit the .debug_info types of the Decl. These will count towards the size of // the buffer, so we have to do it before computing the offset, and we can't perform the actual // relocations yet. @@ -2467,12 +2236,15 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { var it = dbg_info_type_relocs.iterator(); while (it.next()) |entry| { entry.value_ptr.off = @intCast(u32, dbg_info_buffer.items.len); - try self.addDbgInfoType(entry.key_ptr.*, &dbg_info_buffer); + try self.addDbgInfoType(entry.key_ptr.*, dbg_info_buffer); } } + const text_block = &decl.link.elf; try self.updateDeclDebugInfoAllocation(text_block, @intCast(u32, dbg_info_buffer.items.len)); + const target_endian = self.base.options.target.cpu.arch.endian(); + { // Now that we have the offset assigned we can finally perform type relocations. var it = dbg_info_type_relocs.valueIterator(); @@ -2495,6 +2267,290 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { return self.updateDeclExports(module, decl, decl_exports); } +pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .elf) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } + + const tracy = trace(@src()); + defer tracy.end(); + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_line_buffer.deinit(); + + var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_info_buffer.deinit(); + + var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; + defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs); + + // For functions we need to add a prologue to the debug line program. + try dbg_line_buffer.ensureCapacity(26); + + const decl = func.owner_decl; + const line_off = @intCast(u28, decl.src_line + func.lbrace_line); + + const ptr_width_bytes = self.ptrWidthBytes(); + dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{ + DW.LNS_extended_op, + ptr_width_bytes + 1, + DW.LNE_set_address, + }); + // This is the "relocatable" vaddr, corresponding to `code_buffer` index `0`. + assert(dbg_line_vaddr_reloc_index == dbg_line_buffer.items.len); + dbg_line_buffer.items.len += ptr_width_bytes; + + dbg_line_buffer.appendAssumeCapacity(DW.LNS_advance_line); + // This is the "relocatable" relative line offset from the previous function's end curly + // to this function's begin curly. + assert(self.getRelocDbgLineOff() == dbg_line_buffer.items.len); + // Here we use a ULEB128-fixed-4 to make sure this field can be overwritten later. + leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line_off); + + dbg_line_buffer.appendAssumeCapacity(DW.LNS_set_file); + assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len); + // Once we support more than one source file, this will have the ability to be more + // than one possible value. + const file_index = 1; + leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index); + + // Emit a line for the begin curly with prologue_end=false. The codegen will + // do the work of setting prologue_end=true and epilogue_begin=true. + dbg_line_buffer.appendAssumeCapacity(DW.LNS_copy); + + // .debug_info subprogram + const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1]; + try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len); + + const fn_ret_type = decl.ty.fnReturnType(); + const fn_ret_has_bits = fn_ret_type.hasCodeGenBits(); + if (fn_ret_has_bits) { + dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram); + } else { + dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram_retvoid); + } + // These get overwritten after generating the machine code. These values are + // "relocations" and have to be in this fixed place so that functions can be + // moved in virtual address space. + assert(dbg_info_low_pc_reloc_index == dbg_info_buffer.items.len); + dbg_info_buffer.items.len += ptr_width_bytes; // DW.AT_low_pc, DW.FORM_addr + assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len); + dbg_info_buffer.items.len += 4; // DW.AT_high_pc, DW.FORM_data4 + if (fn_ret_has_bits) { + const gop = try dbg_info_type_relocs.getOrPut(self.base.allocator, fn_ret_type); + if (!gop.found_existing) { + gop.value_ptr.* = .{ + .off = undefined, + .relocs = .{}, + }; + } + try gop.value_ptr.relocs.append(self.base.allocator, @intCast(u32, dbg_info_buffer.items.len)); + dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4 + } + dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string + + const res = try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg_line_buffer, + .dbg_info = &dbg_info_buffer, + .dbg_info_type_relocs = &dbg_info_type_relocs, + }, + }); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + const local_sym = try self.updateDeclCode(decl, code, elf.STT_FUNC); + + const target_endian = self.base.options.target.cpu.arch.endian(); + + // Since the Decl is a function, we need to update the .debug_line program. + // Perform the relocations based on vaddr. + switch (self.ptr_width) { + .p32 => { + { + const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4]; + mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); + } + { + const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4]; + mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); + } + }, + .p64 => { + { + const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..8]; + mem.writeInt(u64, ptr, local_sym.st_value, target_endian); + } + { + const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..8]; + mem.writeInt(u64, ptr, local_sym.st_value, target_endian); + } + }, + } + { + const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4]; + mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_size), target_endian); + } + + try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS_extended_op, 1, DW.LNE_end_sequence }); + + // Now we have the full contents and may allocate a region to store it. + + // This logic is nearly identical to the logic below in `updateDeclDebugInfoAllocation` for + // `TextBlock` and the .debug_info. If you are editing this logic, you + // probably need to edit that logic too. + + const debug_line_sect = &self.sections.items[self.debug_line_section_index.?]; + const src_fn = &decl.fn_link.elf; + src_fn.len = @intCast(u32, dbg_line_buffer.items.len); + if (self.dbg_line_fn_last) |last| not_first: { + if (src_fn.next) |next| { + // Update existing function - non-last item. + if (src_fn.off + src_fn.len + min_nop_size > next.off) { + // It grew too big, so we move it to a new location. + if (src_fn.prev) |prev| { + self.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {}; + prev.next = src_fn.next; + } + assert(src_fn.prev != next); + next.prev = src_fn.prev; + src_fn.next = null; + // Populate where it used to be with NOPs. + const file_pos = debug_line_sect.sh_offset + src_fn.off; + try self.pwriteDbgLineNops(0, &[0]u8{}, src_fn.len, file_pos); + // TODO Look at the free list before appending at the end. + src_fn.prev = last; + last.next = src_fn; + self.dbg_line_fn_last = src_fn; + + src_fn.off = last.off + padToIdeal(last.len); + } + } else if (src_fn.prev == null) { + if (src_fn == last) { + // Special case: there is only 1 function and it is being updated. + // In this case there is nothing to do. The function's length has + // already been updated, and the logic below takes care of + // resizing the .debug_line section. + break :not_first; + } + // Append new function. + // TODO Look at the free list before appending at the end. + src_fn.prev = last; + last.next = src_fn; + self.dbg_line_fn_last = src_fn; + + src_fn.off = last.off + padToIdeal(last.len); + } + } else { + // This is the first function of the Line Number Program. + self.dbg_line_fn_first = src_fn; + self.dbg_line_fn_last = src_fn; + + src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes()); + } + + const last_src_fn = self.dbg_line_fn_last.?; + const needed_size = last_src_fn.off + last_src_fn.len; + if (needed_size != debug_line_sect.sh_size) { + if (needed_size > self.allocatedSize(debug_line_sect.sh_offset)) { + const new_offset = self.findFreeSpace(needed_size, 1); + const existing_size = last_src_fn.off; + log.debug("moving .debug_line section: {d} bytes from 0x{x} to 0x{x}", .{ + existing_size, + debug_line_sect.sh_offset, + new_offset, + }); + const amt = try self.base.file.?.copyRangeAll(debug_line_sect.sh_offset, self.base.file.?, new_offset, existing_size); + if (amt != existing_size) return error.InputOutput; + debug_line_sect.sh_offset = new_offset; + } + debug_line_sect.sh_size = needed_size; + self.shdr_table_dirty = true; // TODO look into making only the one section dirty + self.debug_line_header_dirty = true; + } + const prev_padding_size: u32 = if (src_fn.prev) |prev| src_fn.off - (prev.off + prev.len) else 0; + const next_padding_size: u32 = if (src_fn.next) |next| next.off - (src_fn.off + src_fn.len) else 0; + + // We only have support for one compilation unit so far, so the offsets are directly + // from the .debug_line section. + const file_pos = debug_line_sect.sh_offset + src_fn.off; + try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos); + + // .debug_info - End the TAG_subprogram children. + try dbg_info_buffer.append(0); + + return self.finishUpdateDecl(module, decl, &dbg_info_type_relocs, &dbg_info_buffer); +} + +pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native and builtin.object_format != .elf) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } + + const tracy = trace(@src()); + defer tracy.end(); + + if (decl.val.tag() == .extern_fn) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + if (decl.val.castTag(.variable)) |payload| { + const variable = payload.data; + if (variable.is_extern) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + } + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_info_buffer.deinit(); + + var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; + defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs); + + // TODO implement .debug_info for global variables + const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + .ty = decl.ty, + .val = decl_val, + }, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg_line_buffer, + .dbg_info = &dbg_info_buffer, + .dbg_info_type_relocs = &dbg_info_type_relocs, + }, + }); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + _ = try self.updateDeclCode(decl, code, elf.STT_OBJECT); + return self.finishUpdateDecl(module, decl, &dbg_info_type_relocs, &dbg_info_buffer); +} + /// Asserts the type has codegen bits. fn addDbgInfoType(self: *Elf, ty: Type, dbg_info_buffer: *std.ArrayList(u8)) !void { switch (ty.zigTypeTag()) { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index df2e0134e4..cd020c1b27 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1,6 +1,7 @@ const MachO = @This(); const std = @import("std"); +const builtin = @import("builtin"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; const fmt = std.fmt; @@ -22,6 +23,8 @@ const link = @import("../link.zig"); const File = link.File; const Cache = @import("../Cache.zig"); const target_util = @import("../target.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const DebugSymbols = @import("MachO/DebugSymbols.zig"); const Trie = @import("MachO/Trie.zig"); @@ -1132,7 +1135,55 @@ pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void { }; } +pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .macho) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } + const tracy = trace(@src()); + defer tracy.end(); + + const decl = func.owner_decl; + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var debug_buffers = if (self.d_sym) |*ds| try ds.initDeclDebugBuffers(self.base.allocator, module, decl) else null; + defer { + if (debug_buffers) |*dbg| { + dbg.dbg_line_buffer.deinit(); + dbg.dbg_info_buffer.deinit(); + var it = dbg.dbg_info_type_relocs.valueIterator(); + while (it.next()) |value| { + value.relocs.deinit(self.base.allocator); + } + dbg.dbg_info_type_relocs.deinit(self.base.allocator); + } + } + + const res = if (debug_buffers) |*dbg| + try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg.dbg_line_buffer, + .dbg_info = &dbg.dbg_info_buffer, + .dbg_info_type_relocs = &dbg.dbg_info_type_relocs, + }, + }) + else + try codegen.generateSymbol(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); + + return self.finishUpdateDecl(module, decl, res); +} + pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native and builtin.object_format != .macho) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } const tracy = trace(@src()); defer tracy.end(); @@ -1173,6 +1224,10 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { .val = decl.val, }, &code_buffer, .none); + return self.finishUpdateDecl(module, decl, res); +} + +fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: codegen.Result) !void { const code = switch (res) { .externally_managed => |x| x, .appended => code_buffer.items, diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 80a92f9cdb..bc044ce414 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -2,18 +2,21 @@ //! would be to add incremental linking in a similar way as ELF does. const Plan9 = @This(); - -const std = @import("std"); const link = @import("../link.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); const aout = @import("Plan9/aout.zig"); const codegen = @import("../codegen.zig"); const trace = @import("../tracy.zig").trace; -const mem = std.mem; const File = link.File; -const Allocator = std.mem.Allocator; +const build_options = @import("build_options"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); +const std = @import("std"); +const builtin = @import("builtin"); +const mem = std.mem; +const Allocator = std.mem.Allocator; const log = std.log.scoped(.link); const assert = std.debug.assert; @@ -120,6 +123,19 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Plan9 { return self; } +pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .plan9) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + _ = module; + // Keep track of all decls so we can iterate over them on flush(). + _ = try self.decl_table.getOrPut(self.base.allocator, func.owner_decl); + + _ = air; + _ = liveness; + @panic("TODO Plan9 needs to keep track of Air and Liveness so it can use them later"); +} + pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void { _ = module; _ = try self.decl_table.getOrPut(self.base.allocator, decl); @@ -138,6 +154,9 @@ pub fn flush(self: *Plan9, comp: *Compilation) !void { } pub fn flushModule(self: *Plan9, comp: *Compilation) !void { + if (build_options.skip_non_native and builtin.object_format != .plan9) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } _ = comp; const tracy = trace(@src()); defer tracy.end(); @@ -199,7 +218,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { } } if (std.mem.eql(u8, exp.options.name, "_start")) { - std.debug.assert(decl.link.plan9.type == .t); // we tried to link a non-function as the entry + assert(decl.link.plan9.type == .t); // we tried to link a non-function as the entry self.entry_decl = decl; } if (exp.link.plan9) |i| { diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 8a2e877d42..bc9e560582 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -36,6 +36,8 @@ const ResultId = codegen.ResultId; const trace = @import("../tracy.zig").trace; const build_options = @import("build_options"); const spec = @import("../codegen/spirv/spec.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); // TODO: Should this struct be used at all rather than just a hashmap of aux data for every decl? pub const FnData = struct { @@ -101,7 +103,23 @@ pub fn deinit(self: *SpirV) void { self.decl_table.deinit(self.base.allocator); } +pub fn updateFunc(self: *SpirV, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native) { + @panic("Attempted to compile for architecture that was disabled by build configuration"); + } + _ = module; + // Keep track of all decls so we can iterate over them on flush(). + _ = try self.decl_table.getOrPut(self.base.allocator, func.owner_decl); + + _ = air; + _ = liveness; + @panic("TODO SPIR-V needs to keep track of Air and Liveness so it can use them later"); +} + pub fn updateDecl(self: *SpirV, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native) { + @panic("Attempted to compile for architecture that was disabled by build configuration"); + } _ = module; // Keep track of all decls so we can iterate over them on flush(). _ = try self.decl_table.getOrPut(self.base.allocator, decl); @@ -132,13 +150,13 @@ pub fn flush(self: *SpirV, comp: *Compilation) !void { } pub fn flushModule(self: *SpirV, comp: *Compilation) !void { - const tracy = trace(@src()); - defer tracy.end(); - if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } + const tracy = trace(@src()); + defer tracy.end(); + const module = self.base.options.module.?; const target = comp.getTarget(); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 15a36a4bcc..be6ad78701 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1,6 +1,7 @@ const Wasm = @This(); const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -18,6 +19,8 @@ const build_options = @import("build_options"); const wasi_libc = @import("../wasi_libc.zig"); const Cache = @import("../Cache.zig"); const TypedValue = @import("../TypedValue.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); pub const base_tag = link.File.Tag.wasm; @@ -186,11 +189,60 @@ pub fn allocateDeclIndexes(self: *Wasm, decl: *Module.Decl) !void { } } +pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .wasm) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } + const decl = func.owner_decl; + assert(decl.link.wasm.init); // Must call allocateDeclIndexes() + + const fn_data = &decl.fn_link.wasm; + fn_data.functype.items.len = 0; + fn_data.code.items.len = 0; + fn_data.idx_refs.items.len = 0; + + var context = codegen.Context{ + .gpa = self.base.allocator, + .air = air, + .liveness = liveness, + .values = .{}, + .code = fn_data.code.toManaged(self.base.allocator), + .func_type_data = fn_data.functype.toManaged(self.base.allocator), + .decl = decl, + .err_msg = undefined, + .locals = .{}, + .target = self.base.options.target, + .global_error_set = self.base.options.module.?.global_error_set, + }; + defer context.deinit(); + + // generate the 'code' section for the function declaration + const result = context.genFunc(func) catch |err| switch (err) { + error.CodegenFail => { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, context.err_msg); + return; + }, + else => |e| return e, + }; + return self.finishUpdateDecl(decl, result); +} + // Generate code for the Decl, storing it in memory to be later written to // the file on flush(). pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { - std.debug.assert(decl.link.wasm.init); // Must call allocateDeclIndexes() + if (build_options.skip_non_native and builtin.object_format != .wasm) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } + assert(decl.link.wasm.init); // Must call allocateDeclIndexes() + // TODO don't use this for non-functions const fn_data = &decl.fn_link.wasm; fn_data.functype.items.len = 0; fn_data.code.items.len = 0; @@ -218,7 +270,10 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { }, else => |e| return e, }; + return self.finishUpdateDecl(decl, result); +} +fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, result: codegen.Result) !void { const code: []const u8 = switch (result) { .appended => @as([]const u8, context.code.items), .externally_managed => |payload| payload, @@ -521,7 +576,7 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void { var data_offset = offset_table_size; while (cur) |cur_block| : (cur = cur_block.next) { if (cur_block.size == 0) continue; - std.debug.assert(cur_block.init); + assert(cur_block.init); const offset = (cur_block.offset_index) * ptr_width; var buf: [4]u8 = undefined; -- cgit v1.2.3 From c09b973ec25f328f5e15e9e6eed4da7f5e4634af Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 13 Jul 2021 15:45:08 -0700 Subject: stage2: compile error fixes for AIR memory layout branch Now the branch is compiling again, provided that one uses `-Dskip-non-native`, but many code paths are disabled. The code paths can now be re-enabled one at a time and updated to conform to the new AIR memory layout. --- src/Air.zig | 30 +- src/Compilation.zig | 2 +- src/Liveness.zig | 71 ++-- src/Module.zig | 34 +- src/Sema.zig | 986 +++++++++++++++++++++++++++++----------------------- src/codegen.zig | 159 +++++---- src/codegen/c.zig | 204 +++++------ src/link/Elf.zig | 3 + src/value.zig | 2 +- 9 files changed, 851 insertions(+), 640 deletions(-) (limited to 'src/Liveness.zig') diff --git a/src/Air.zig b/src/Air.zig index e85f2e5c43..1f294c43f3 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -332,12 +332,12 @@ pub const Block = struct { body_len: u32, }; -/// Trailing is a list of `Ref` for every `args_len`. +/// Trailing is a list of `Inst.Ref` for every `args_len`. pub const Call = struct { args_len: u32, }; -/// This data is stored inside extra, with two sets of trailing `Ref`: +/// This data is stored inside extra, with two sets of trailing `Inst.Ref`: /// * 0. the then body, according to `then_body_len`. /// * 1. the else body, according to `else_body_len`. pub const CondBr = struct { @@ -355,19 +355,19 @@ pub const SwitchBr = struct { /// Trailing: /// * instruction index for each `body_len`. pub const Case = struct { - item: Ref, + item: Inst.Ref, body_len: u32, }; }; pub const StructField = struct { - struct_ptr: Ref, + struct_ptr: Inst.Ref, field_index: u32, }; /// Trailing: -/// 0. `Ref` for every outputs_len -/// 1. `Ref` for every inputs_len +/// 0. `Inst.Ref` for every outputs_len +/// 1. `Inst.Ref` for every inputs_len pub const Asm = struct { /// Index to the corresponding ZIR instruction. /// `asm_source`, `outputs_len`, `inputs_len`, `clobbers_len`, `is_volatile`, and @@ -381,6 +381,24 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { return air.extra[body_index..][0..body_len]; } +pub fn getType(air: Air, inst: Air.Inst.Index) Type { + _ = air; + _ = inst; + @panic("TODO Air getType"); +} + +pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { + var i: usize = @enumToInt(ref); + if (i < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; + } + i -= Air.Inst.Ref.typed_value_map.len; + const air_tags = air.instructions.items(.tag); + const air_datas = air.instructions.items(.data); + assert(air_tags[i] == .const_ty); + return air_datas[i].ty; +} + /// Returns the requested data, as well as the new index which is at the start of the /// trailers for the object. pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end: usize } { diff --git a/src/Compilation.zig b/src/Compilation.zig index 90224a77d1..4a442a8b67 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2023,7 +2023,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor defer air.deinit(gpa); log.debug("analyze liveness of {s}", .{decl.name}); - var liveness = try Liveness.analyze(gpa, air); + var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); defer liveness.deinit(gpa); if (std.builtin.mode == .Debug and self.verbose_air) { diff --git a/src/Liveness.zig b/src/Liveness.zig index 1402a5997b..838f19d4a1 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -7,11 +7,13 @@ //! * Switch Branches const Liveness = @This(); const std = @import("std"); -const Air = @import("Air.zig"); const trace = @import("tracy.zig").trace; const log = std.log.scoped(.liveness); const assert = std.debug.assert; const Allocator = std.mem.Allocator; +const Air = @import("Air.zig"); +const Zir = @import("Zir.zig"); +const Log2Int = std.math.Log2Int; /// This array is split into sets of 4 bits per AIR instruction. /// The MSB (0bX000) is whether the instruction is unreferenced. @@ -44,7 +46,7 @@ pub const SwitchBr = struct { else_death_count: u32, }; -pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { +pub fn analyze(gpa: *Allocator, air: Air, zir: Zir) Allocator.Error!Liveness { const tracy = trace(@src()); defer tracy.end(); @@ -58,6 +60,7 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { ), .extra = .{}, .special = .{}, + .zir = &zir, }; errdefer gpa.free(a.tomb_bits); errdefer a.special.deinit(gpa); @@ -74,23 +77,32 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { }; } +pub fn getTombBits(l: Liveness, inst: Air.Inst.Index) Bpi { + const usize_index = (inst * bpi) / @bitSizeOf(usize); + return @truncate(Bpi, l.tomb_bits[usize_index] >> + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi)); +} + pub fn isUnused(l: Liveness, inst: Air.Inst.Index) bool { const usize_index = (inst * bpi) / @bitSizeOf(usize); - const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1)); + const mask = @as(usize, 1) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1)); return (l.tomb_bits[usize_index] & mask) != 0; } pub fn operandDies(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) bool { assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); - const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); + const mask = @as(usize, 1) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); return (l.tomb_bits[usize_index] & mask) != 0; } pub fn clearOperandDeath(l: *Liveness, inst: Air.Inst.Index, operand: OperandInt) void { assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); - const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); + const mask = @as(usize, 1) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); l.tomb_bits[usize_index] |= mask; } @@ -113,10 +125,12 @@ const Analysis = struct { tomb_bits: []usize, special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), extra: std.ArrayListUnmanaged(u32), + zir: *const Zir, fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void { const usize_index = (inst * bpi) / @bitSizeOf(usize); - a.tomb_bits[usize_index] |= tomb_bits << (inst % (@bitSizeOf(usize) / bpi)) * bpi; + a.tomb_bits[usize_index] |= @as(usize, tomb_bits) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi); } fn addExtra(a: *Analysis, extra: anytype) Allocator.Error!u32 { @@ -203,9 +217,11 @@ fn analyzeInst( return trackOperands(a, new_set, inst, main_tomb, .{ o.lhs, o.rhs, .none }); }, + .arg, .alloc, .br, .constant, + .const_ty, .breakpoint, .dbg_stmt, .varptr, @@ -255,15 +271,30 @@ fn analyzeInst( if (args.len <= bpi - 2) { var buf: [bpi - 1]Air.Inst.Ref = undefined; buf[0] = callee; - std.mem.copy(&buf, buf[1..], args); + std.mem.copy(Air.Inst.Ref, buf[1..], @bitCast([]const Air.Inst.Ref, args)); return trackOperands(a, new_set, inst, main_tomb, buf); } - @panic("TODO: liveness analysis for function with many args"); + @panic("TODO: liveness analysis for function with greater than 2 args"); }, .struct_field_ptr => { const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ extra.struct_ptr, .none, .none }); }, + .assembly => { + const extra = a.air.extraData(Air.Asm, inst_datas[inst].ty_pl.payload); + const extended = a.zir.instructions.items(.data)[extra.data.zir_index].extended; + const outputs_len = @truncate(u5, extended.small); + const inputs_len = @truncate(u5, extended.small >> 5); + const outputs = a.air.extra[extra.end..][0..outputs_len]; + const inputs = a.air.extra[extra.end + outputs.len ..][0..inputs_len]; + if (outputs.len + inputs.len <= bpi - 1) { + var buf: [bpi - 1]Air.Inst.Ref = undefined; + std.mem.copy(Air.Inst.Ref, &buf, @bitCast([]const Air.Inst.Ref, outputs)); + std.mem.copy(Air.Inst.Ref, buf[outputs.len..], @bitCast([]const Air.Inst.Ref, inputs)); + return trackOperands(a, new_set, inst, main_tomb, buf); + } + @panic("TODO: liveness analysis for asm with greater than 3 args"); + }, .block => { const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); const body = a.air.extra[extra.end..][0..extra.data.body_len]; @@ -287,8 +318,8 @@ fn analyzeInst( const then_body = a.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = a.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - var then_table = std.AutoHashMap(Air.Inst.Index, void).init(gpa); - defer then_table.deinit(); + var then_table: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}; + defer then_table.deinit(gpa); try analyzeWithContext(a, &then_table, then_body); // Reset the table back to its state from before the branch. @@ -299,8 +330,8 @@ fn analyzeInst( } } - var else_table = std.AutoHashMap(Air.Inst.Index, void).init(gpa); - defer else_table.deinit(); + var else_table: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}; + defer else_table.deinit(gpa); try analyzeWithContext(a, &else_table, else_body); var then_entry_deaths = std.ArrayList(Air.Inst.Index).init(gpa); @@ -331,7 +362,7 @@ fn analyzeInst( } // Now we have to correctly populate new_set. if (new_set) |ns| { - try ns.ensureCapacity(@intCast(u32, ns.count() + then_table.count() + else_table.count())); + try ns.ensureCapacity(gpa, @intCast(u32, ns.count() + then_table.count() + else_table.count())); var it = then_table.keyIterator(); while (it.next()) |key| { _ = ns.putAssumeCapacity(key.*, {}); @@ -344,7 +375,7 @@ fn analyzeInst( const then_death_count = @intCast(u32, then_entry_deaths.items.len); const else_death_count = @intCast(u32, else_entry_deaths.items.len); - try a.extra.ensureUnusedCapacity(std.meta.fields(@TypeOf(CondBr)).len + + try a.extra.ensureUnusedCapacity(gpa, std.meta.fields(Air.CondBr).len + then_death_count + else_death_count); const extra_index = a.addExtraAssumeCapacity(CondBr{ .then_death_count = then_death_count, @@ -352,7 +383,7 @@ fn analyzeInst( }); a.extra.appendSliceAssumeCapacity(then_entry_deaths.items); a.extra.appendSliceAssumeCapacity(else_entry_deaths.items); - try a.special.put(inst, extra_index); + try a.special.put(gpa, inst, extra_index); // Continue on with the instruction analysis. The following code will find the condition // instruction, and the deaths flag for the CondBr instruction will indicate whether the @@ -438,12 +469,12 @@ fn analyzeInst( }); for (case_deaths[0 .. case_deaths.len - 1]) |*cd| { const case_death_count = @intCast(u32, cd.items.len); - try a.extra.ensureUnusedCapacity(1 + case_death_count + else_death_count); + try a.extra.ensureUnusedCapacity(gpa, 1 + case_death_count + else_death_count); a.extra.appendAssumeCapacity(case_death_count); a.extra.appendSliceAssumeCapacity(cd.items); } a.extra.appendSliceAssumeCapacity(case_deaths[case_deaths.len - 1].items); - try a.special.put(inst, extra_index); + try a.special.put(gpa, inst, extra_index); return trackOperands(a, new_set, inst, main_tomb, .{ condition, .none, .none }); }, @@ -452,7 +483,7 @@ fn analyzeInst( fn trackOperands( a: *Analysis, - new_set: ?*std.AutoHashMap(Air.Inst.Index, void), + new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), inst: Air.Inst.Index, main_tomb: bool, operands: [bpi - 1]Air.Inst.Ref, @@ -468,12 +499,12 @@ fn trackOperands( tomb_bits <<= 1; const op_int = @enumToInt(operands[i]); if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const operand: Air.Inst.Index = op_int - Air.Inst.Ref.typed_value_map.len; + const operand: Air.Inst.Index = op_int - @intCast(u32, Air.Inst.Ref.typed_value_map.len); const prev = try table.fetchPut(gpa, operand, {}); if (prev == null) { // Death. tomb_bits |= 1; - if (new_set) |ns| try ns.putNoClobber(operand, {}); + if (new_set) |ns| try ns.putNoClobber(gpa, operand, {}); } } a.storeTombBits(inst, tomb_bits); diff --git a/src/Module.zig b/src/Module.zig index 5972c2bdcf..7ec9c7e93d 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1225,6 +1225,30 @@ pub const Scope = struct { pub fn getFileScope(block: *Block) *Scope.File { return block.src_decl.namespace.file_scope; } + + pub fn addTyOp( + block: *Block, + tag: Air.Inst.Tag, + ty: Type, + operand: Air.Inst.Ref, + ) error{OutOfMemory}!Air.Inst.Ref { + const sema = block.sema; + const gpa = sema.gpa; + + try sema.air_instructions.ensureUnusedCapacity(gpa, 1); + try block.instructions.ensureUnusedCapacity(gpa, 1); + + const inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + sema.air_instructions.appendAssumeCapacity(.{ + .tag = tag, + .data = .{ .ty_op = .{ + .ty = try sema.addType(ty), + .operand = operand, + } }, + }); + block.instructions.appendAssumeCapacity(inst); + return Sema.indexToRef(inst); + } }; }; @@ -3408,7 +3432,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { defer decl.value_arena.?.* = arena.state; const fn_ty = decl.ty; - const param_inst_list = try gpa.alloc(Air.Inst.Index, fn_ty.fnParamLen()); + const param_inst_list = try gpa.alloc(Air.Inst.Ref, fn_ty.fnParamLen()); defer gpa.free(param_inst_list); var sema: Sema = .{ @@ -3440,10 +3464,13 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { defer inner_block.instructions.deinit(gpa); // AIR requires the arg parameters to be the first N instructions. + try inner_block.instructions.ensureTotalCapacity(gpa, param_inst_list.len); for (param_inst_list) |*param_inst, param_index| { const param_type = fn_ty.fnParamType(param_index); const ty_ref = try sema.addType(param_type); - param_inst.* = @intCast(u32, sema.air_instructions.len); + const arg_index = @intCast(u32, sema.air_instructions.len); + inner_block.instructions.appendAssumeCapacity(arg_index); + param_inst.* = Sema.indexToRef(arg_index); try sema.air_instructions.append(gpa, .{ .tag = .arg, .data = .{ @@ -3454,7 +3481,6 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { }, }); } - try inner_block.instructions.appendSlice(gpa, param_inst_list); func.state = .in_progress; log.debug("set {s} to in_progress", .{decl.name}); @@ -4043,13 +4069,11 @@ pub fn floatMul( } pub fn simplePtrType( - mod: *Module, arena: *Allocator, elem_ty: Type, mutable: bool, size: std.builtin.TypeInfo.Pointer.Size, ) Allocator.Error!Type { - _ = mod; if (!mutable and size == .Slice and elem_ty.eql(Type.initTag(.u8))) { return Type.initTag(.const_slice_u8); } diff --git a/src/Sema.zig b/src/Sema.zig index 54c42a482d..fc130cd4a4 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -36,7 +36,7 @@ func: ?*Module.Fn, /// > Denormalized data to make `resolveInst` faster. This is 0 if not inside a function, /// > otherwise it is the number of parameters of the function. /// > param_count: u32 -param_inst_list: []const Air.Inst.Index, +param_inst_list: []const Air.Inst.Ref, branch_quota: u32 = 1000, branch_count: u32 = 0, /// This field is updated when a new source location becomes active, so that @@ -59,8 +59,6 @@ const TypedValue = @import("TypedValue.zig"); const Air = @import("Air.zig"); const Zir = @import("Zir.zig"); const Module = @import("Module.zig"); -const Inst = ir.Inst; -const Body = ir.Body; const trace = @import("tracy.zig").trace; const Scope = Module.Scope; const InnerError = Module.InnerError; @@ -117,7 +115,7 @@ pub fn analyzeFnBody( /// Returns only the result from the body that is specified. /// Only appropriate to call when it is determined at comptime that this body /// has no peers. -fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!Air.Inst.Index { +fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!Air.Inst.Ref { const break_inst = try sema.analyzeBody(block, body); const operand_ref = sema.code.instructions.items(.data)[break_inst].@"break".operand; return sema.resolveInst(operand_ref); @@ -513,7 +511,7 @@ pub fn analyzeBody( // const break_inst = try sema.analyzeBody(block, inline_body); // const break_data = datas[break_inst].@"break"; // if (inst == break_data.block_inst) { - // break :blk try sema.resolveInst(break_data.operand); + // break :blk sema.resolveInst(break_data.operand); // } else { // return break_inst; // } @@ -529,12 +527,12 @@ pub fn analyzeBody( // const break_inst = try sema.analyzeBody(block, inline_body); // const break_data = datas[break_inst].@"break"; // if (inst == break_data.block_inst) { - // break :blk try sema.resolveInst(break_data.operand); + // break :blk sema.resolveInst(break_data.operand); // } else { // return break_inst; // } //}, - else => @panic("TODO remove else prong"), + else => @panic("TODO finish updating Sema for AIR memory layout changes and then remove this else prong"), }; if (sema.getAirType(air_inst).isNoReturn()) return always_noreturn; @@ -543,7 +541,7 @@ pub fn analyzeBody( } } -fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const extended = sema.code.instructions.items(.data)[inst].extended; switch (extended.opcode) { // zig fmt: off @@ -598,7 +596,7 @@ fn resolveConstBool( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) !bool { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const wanted_type = Type.initTag(.bool); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); @@ -611,7 +609,7 @@ fn resolveConstString( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) ![]u8 { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const wanted_type = Type.initTag(.const_slice_u8); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); @@ -619,24 +617,39 @@ fn resolveConstString( } pub fn resolveType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); return sema.resolveAirAsType(block, src, air_inst); } -fn resolveAirAsType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, air_inst: Air.Inst.Index) !Type { +fn resolveAirAsType( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + air_inst: Air.Inst.Ref, +) !Type { const wanted_type = Type.initTag(.@"type"); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); return val.toType(sema.arena); } -fn resolveConstValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !Value { - return (try sema.resolveDefinedValue(block, src, base)) orelse +fn resolveConstValue( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + air_ref: Air.Inst.Ref, +) !Value { + return (try sema.resolveDefinedValue(block, src, air_ref)) orelse return sema.failWithNeededComptime(block, src); } -fn resolveDefinedValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !?Value { - if (try sema.resolvePossiblyUndefinedValue(block, src, base)) |val| { +fn resolveDefinedValue( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + air_ref: Air.Inst.Ref, +) !?Value { + if (try sema.resolvePossiblyUndefinedValue(block, src, air_ref)) |val| { if (val.isUndef()) { return sema.failWithUseOfUndef(block, src); } @@ -649,13 +662,29 @@ fn resolvePossiblyUndefinedValue( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - base: Air.Inst.Index, + air_ref: Air.Inst.Ref, ) !?Value { - if (try sema.typeHasOnePossibleValue(block, src, base.ty)) |opv| { + const ty = sema.getTypeOfAirRef(air_ref); + if (try sema.typeHasOnePossibleValue(block, src, ty)) |opv| { return opv; } - const inst = base.castTag(.constant) orelse return null; - return inst.val; + // First section of indexes correspond to a set number of constant values. + var i: usize = @enumToInt(air_ref); + if (i < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[i].val; + } + i -= Air.Inst.Ref.typed_value_map.len; + + switch (sema.air_instructions.items(.tag)[i]) { + .constant => { + const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; + return sema.air_values.items[ty_pl.payload]; + }, + .const_ty => { + return sema.air_instructions.items(.data)[i].ty.toValue(undefined) catch unreachable; + }, + else => return null, + } } fn failWithNeededComptime(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) InnerError { @@ -677,7 +706,7 @@ fn resolveAlreadyCoercedInt( comptime Int: type, ) !Int { comptime assert(@typeInfo(Int).Int.bits <= 64); - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const val = try sema.resolveConstValue(block, src, air_inst); switch (@typeInfo(Int).Int.signedness) { .signed => return @intCast(Int, val.toSignedInt()), @@ -692,7 +721,7 @@ fn resolveInt( zir_ref: Zir.Inst.Ref, dest_type: Type, ) !u64 { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, dest_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced); @@ -705,21 +734,21 @@ pub fn resolveInstConst( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) InnerError!TypedValue { - const air_inst = try sema.resolveInst(zir_ref); - const val = try sema.resolveConstValue(block, src, air_inst); + const air_ref = sema.resolveInst(zir_ref); + const val = try sema.resolveConstValue(block, src, air_ref); return TypedValue{ - .ty = air_inst.ty, + .ty = sema.getTypeOfAirRef(air_ref), .val = val, }; } -fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO implement zir_sema.zirBitcastResultPtr", .{}); } -fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = inst; const tracy = trace(@src()); defer tracy.end(); @@ -754,7 +783,7 @@ fn zirStructDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); @@ -825,7 +854,7 @@ fn zirEnumDecl( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1022,7 +1051,7 @@ fn zirUnionDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1086,7 +1115,7 @@ fn zirOpaqueDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1106,7 +1135,7 @@ fn zirErrorSetDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1146,7 +1175,7 @@ fn zirRetPtr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1154,16 +1183,16 @@ fn zirRetPtr( try sema.requireFunctionBlock(block, src); const fn_ty = sema.func.?.owner_decl.ty; const ret_type = fn_ty.fnReturnType(); - const ptr_type = try sema.mod.simplePtrType(sema.arena, ret_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, ret_type, true, .One); return block.addNoOp(src, ptr_type, .alloc); } -fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_tok; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.analyzeRef(block, inst_data.src(), operand); } @@ -1171,7 +1200,7 @@ fn zirRetType( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1187,7 +1216,7 @@ fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) I defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.ensureResultUsed(block, operand, src); @@ -1196,7 +1225,7 @@ fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) I fn ensureResultUsed( sema: *Sema, block: *Scope.Block, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, src: LazySrcLoc, ) InnerError!void { switch (operand.ty.zigTypeTag()) { @@ -1210,7 +1239,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); switch (operand.ty.zigTypeTag()) { .ErrorSet, .ErrorUnion => return sema.mod.fail(&block.base, src, "error is discarded", .{}), @@ -1218,13 +1247,13 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde } } -fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const array_ptr = try sema.resolveInst(inst_data.operand); + const array_ptr = sema.resolveInst(inst_data.operand); const elem_ty = array_ptr.ty.elemType(); if (!elem_ty.isIndexable()) { @@ -1267,7 +1296,7 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air // Set the name of the Air.Arg instruction for use by codegen debug info. const air_arg = sema.param_inst_list[arg_index]; - sema.air_instructions.items(.data)[air_arg].ty_str.str = inst_data.start; + sema.air_instructions.items(.data)[refToIndex(air_arg).?].ty_str.str = inst_data.start; return air_arg; } @@ -1275,13 +1304,13 @@ fn zirAllocExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocExtended", .{}); } -fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1289,7 +1318,7 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_type = try sema.resolveType(block, ty_src, inst_data.operand); - const ptr_type = try sema.mod.simplePtrType(sema.arena, var_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); const val_payload = try sema.arena.create(Value.Payload.ComptimeAlloc); val_payload.* = .{ @@ -1304,13 +1333,13 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne }); } -fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocInferredComptime", .{}); } -fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1318,12 +1347,12 @@ fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!A const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_decl_src = inst_data.src(); const var_type = try sema.resolveType(block, ty_src, inst_data.operand); - const ptr_type = try sema.mod.simplePtrType(sema.arena, var_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); try sema.requireRuntimeBlock(block, var_decl_src); return block.addNoOp(var_decl_src, ptr_type, .alloc); } -fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1332,7 +1361,7 @@ fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_type = try sema.resolveType(block, ty_src, inst_data.operand); try sema.validateVarType(block, ty_src, var_type); - const ptr_type = try sema.mod.simplePtrType(sema.arena, var_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); try sema.requireRuntimeBlock(block, var_decl_src); return block.addNoOp(var_decl_src, ptr_type, .alloc); } @@ -1342,7 +1371,7 @@ fn zirAllocInferred( block: *Scope.Block, inst: Zir.Inst.Index, inferred_alloc_ty: Type, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1372,7 +1401,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); const ptr_val = ptr.castTag(.constant).?.val; const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; const peer_inst_list = inferred_alloc.data.stored_inst_list.items; @@ -1385,7 +1414,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde if (var_is_mut) { try sema.validateVarType(block, ty_src, final_elem_ty); } - const final_ptr_ty = try sema.mod.simplePtrType(sema.arena, final_elem_ty, true, .One); + const final_ptr_ty = try Module.simplePtrType(sema.arena, final_elem_ty, true, .One); // Change it to a normal alloc. ptr.ty = final_ptr_ty; @@ -1406,7 +1435,7 @@ fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Ind const struct_obj: *Module.Struct = s: { const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; - const object_ptr = try sema.resolveInst(field_ptr_extra.lhs); + const object_ptr = sema.resolveInst(field_ptr_extra.lhs); break :s object_ptr.ty.elemType().castTag(.@"struct").?.data; }; @@ -1535,9 +1564,9 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In // to omit it. return; } - const ptr = try sema.resolveInst(bin_inst.lhs); - const value = try sema.resolveInst(bin_inst.rhs); - const ptr_ty = try sema.mod.simplePtrType(sema.arena, value.ty, true, .One); + const ptr = sema.resolveInst(bin_inst.lhs); + const value = sema.resolveInst(bin_inst.rhs); + const ptr_ty = try Module.simplePtrType(sema.arena, value.ty, true, .One); // TODO detect when this store should be done at compile-time. For example, // if expressions should force it when the condition is compile-time known. const src: LazySrcLoc = .unneeded; @@ -1552,14 +1581,14 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) const src: LazySrcLoc = .unneeded; const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const ptr = try sema.resolveInst(bin_inst.lhs); - const value = try sema.resolveInst(bin_inst.rhs); + const ptr = sema.resolveInst(bin_inst.lhs); + const value = sema.resolveInst(bin_inst.rhs); const inferred_alloc = ptr.castTag(.constant).?.val.castTag(.inferred_alloc).?; // Add the stored instruction to the set we will use to resolve peer types // for the inferred allocation. try inferred_alloc.data.stored_inst_list.append(sema.arena, value); // Create a runtime bitcast instruction with exactly the type the pointer wants. - const ptr_ty = try sema.mod.simplePtrType(sema.arena, value.ty, true, .One); + const ptr_ty = try Module.simplePtrType(sema.arena, value.ty, true, .One); try sema.requireRuntimeBlock(block, src); const bitcasted_ptr = try block.addUnOp(src, ptr_ty, .bitcast, ptr); return sema.storePtr(block, src, bitcasted_ptr, value); @@ -1578,8 +1607,8 @@ fn zirStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!v defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const ptr = try sema.resolveInst(bin_inst.lhs); - const value = try sema.resolveInst(bin_inst.rhs); + const ptr = sema.resolveInst(bin_inst.lhs); + const value = sema.resolveInst(bin_inst.rhs); return sema.storePtr(block, sema.src, ptr, value); } @@ -1590,18 +1619,18 @@ fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const ptr = try sema.resolveInst(extra.lhs); - const value = try sema.resolveInst(extra.rhs); + const ptr = sema.resolveInst(extra.lhs); + const value = sema.resolveInst(extra.rhs); return sema.storePtr(block, src, ptr, value); } -fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = .unneeded; const inst_data = sema.code.instructions.items(.data)[inst].param_type; - const fn_inst = try sema.resolveInst(inst_data.callee); + const fn_inst = sema.resolveInst(inst_data.callee); const param_index = inst_data.param_index; const fn_ty: Type = switch (fn_inst.ty.zigTypeTag()) { @@ -1631,7 +1660,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.constType(sema.arena, src, param_type); } -fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1659,7 +1688,7 @@ fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.analyzeDeclRef(block, .unneeded, new_decl); } -fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1668,7 +1697,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.mod.constIntUnsigned(sema.arena, .unneeded, Type.initTag(.comptime_int), int); } -fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1686,7 +1715,7 @@ fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! }); } -fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].float; @@ -1699,7 +1728,7 @@ fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!A }); } -fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; @@ -1728,7 +1757,7 @@ fn zirCompileLog( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { var managed = sema.mod.compile_log_text.toManaged(sema.gpa); defer sema.mod.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); @@ -1741,7 +1770,7 @@ fn zirCompileLog( for (args) |arg_ref, i| { if (i != 0) try writer.print(", ", .{}); - const arg = try sema.resolveInst(arg_ref); + const arg = sema.resolveInst(arg_ref); if (try sema.resolvePossiblyUndefinedValue(block, src, arg)) |val| { try writer.print("@as({}, {})", .{ arg.ty, val }); } else { @@ -1773,12 +1802,12 @@ fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); - const msg_inst = try sema.resolveInst(inst_data.operand); + const msg_inst = sema.resolveInst(inst_data.operand); return sema.panicWithMsg(block, src, msg_inst); } -fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1843,7 +1872,7 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } -fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1853,13 +1882,13 @@ fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirCImport", .{}); } -fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirSuspendBlock", .{}); } -fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1917,7 +1946,7 @@ fn resolveBlockBody( child_block: *Scope.Block, body: []const Zir.Inst.Index, merges: *Scope.Block.Merges, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { _ = try sema.analyzeBody(child_block, body); return sema.analyzeBlockBody(parent_block, src, child_block, merges); } @@ -1928,7 +1957,7 @@ fn analyzeBlockBody( src: LazySrcLoc, child_block: *Scope.Block, merges: *Scope.Block.Merges, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2088,7 +2117,7 @@ fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) InnerE const inst_data = sema.code.instructions.items(.data)[inst].@"break"; const src = sema.src; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const zir_block = inst_data.block_inst; var block = start_block; @@ -2136,7 +2165,7 @@ fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError _ = try block.addDbgStmt(.unneeded, inst_data.line, inst_data.column); } -fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2144,7 +2173,7 @@ fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeDeclRef(block, src, decl); } -fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2198,7 +2227,7 @@ fn zirCall( inst: Zir.Inst.Index, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2208,12 +2237,12 @@ fn zirCall( const extra = sema.code.extraData(Zir.Inst.Call, inst_data.payload_index); const args = sema.code.refSlice(extra.end, extra.data.args_len); - const func = try sema.resolveInst(extra.data.callee); + const func = sema.resolveInst(extra.data.callee); // TODO handle function calls of generic functions - const resolved_args = try sema.arena.alloc(Air.Inst.Index, args.len); + const resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len); for (args) |zir_arg, i| { // the args are already casted to the result of a param type instruction. - resolved_args[i] = try sema.resolveInst(zir_arg); + resolved_args[i] = sema.resolveInst(zir_arg); } return sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args); @@ -2222,13 +2251,13 @@ fn zirCall( fn analyzeCall( sema: *Sema, block: *Scope.Block, - func: Air.Inst.Index, + func: Air.Inst.Ref, func_src: LazySrcLoc, call_src: LazySrcLoc, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, - args: []const Air.Inst.Index, -) InnerError!Air.Inst.Index { + args: []const Air.Inst.Ref, +) InnerError!Air.Inst.Ref { if (func.ty.zigTypeTag() != .Fn) return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func.ty}); @@ -2285,7 +2314,7 @@ fn analyzeCall( const is_comptime_call = block.is_comptime or modifier == .compile_time; const is_inline_call = is_comptime_call or modifier == .always_inline or func.ty.fnCallingConvention() == .Inline; - const result: Air.Inst.Index = if (is_inline_call) res: { + const result: Air.Inst.Ref = if (is_inline_call) res: { const func_val = try sema.resolveConstValue(block, func_src, func); const module_fn = switch (func_val.tag()) { .function => func_val.castTag(.function).?.data, @@ -2383,7 +2412,7 @@ fn analyzeCall( return result; } -fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2395,7 +2424,7 @@ fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.constType(sema.arena, src, ty); } -fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2407,7 +2436,7 @@ fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.mod.constType(sema.arena, src, opt_type); } -fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const array_type = try sema.resolveType(block, src, inst_data.operand); @@ -2415,7 +2444,7 @@ fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.constType(sema.arena, src, elem_type); } -fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -2430,7 +2459,7 @@ fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.mod.constType(sema.arena, src, vector_type); } -fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2443,7 +2472,7 @@ fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.constType(sema.arena, .unneeded, array_ty); } -fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2458,7 +2487,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) return sema.mod.constType(sema.arena, .unneeded, array_ty); } -fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2471,7 +2500,7 @@ fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.mod.constType(sema.arena, src, anyframe_type); } -fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2492,7 +2521,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.constType(sema.arena, src, err_union_ty); } -fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2511,14 +2540,14 @@ fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr }); } -fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const op = try sema.resolveInst(inst_data.operand); + const op = sema.resolveInst(inst_data.operand); const op_coerced = try sema.coerce(block, Type.initTag(.anyerror), op, operand_src); const result_ty = Type.initTag(.u16); @@ -2541,7 +2570,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addUnOp(src, result_ty, .bitcast, op_coerced); } -fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2549,7 +2578,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const op = try sema.resolveInst(inst_data.operand); + const op = sema.resolveInst(inst_data.operand); if (try sema.resolveDefinedValue(block, operand_src, op)) |value| { const int = value.toUnsignedInt(); @@ -2574,7 +2603,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addUnOp(src, Type.initTag(.anyerror), .bitcast, op); } -fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2583,8 +2612,8 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); if (rhs.ty.zigTypeTag() == .Bool and lhs.ty.zigTypeTag() == .Bool) { const msg = msg: { const msg = try sema.mod.errMsg(&block.base, lhs_src, "expected error set type, found 'bool'", .{}); @@ -2664,7 +2693,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn }); } -fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2678,15 +2707,15 @@ fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE }); } -fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); - const enum_tag: Air.Inst.Index = switch (operand.ty.zigTypeTag()) { + const enum_tag: Air.Inst.Ref = switch (operand.ty.zigTypeTag()) { .Enum => operand, .Union => { //if (!operand.ty.unionHasTag()) { @@ -2760,7 +2789,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return block.addUnOp(src, int_tag_ty, .bitcast, enum_tag); } -fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const mod = sema.mod; const target = mod.getTarget(); const arena = sema.arena; @@ -2770,7 +2799,7 @@ fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); + const operand = sema.resolveInst(extra.rhs); if (dest_ty.zigTypeTag() != .Enum) { return mod.fail(&block.base, dest_ty_src, "expected enum, found {}", .{dest_ty}); @@ -2821,12 +2850,12 @@ fn zirOptionalPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const optional_ptr = try sema.resolveInst(inst_data.operand); + const optional_ptr = sema.resolveInst(inst_data.operand); assert(optional_ptr.ty.zigTypeTag() == .Pointer); const src = inst_data.src(); @@ -2836,7 +2865,7 @@ fn zirOptionalPayloadPtr( } const child_type = try opt_type.optionalChildAlloc(sema.arena); - const child_pointer = try sema.mod.simplePtrType(sema.arena, child_type, !optional_ptr.ty.isConstPtr(), .One); + const child_pointer = try Module.simplePtrType(sema.arena, child_type, !optional_ptr.ty.isConstPtr(), .One); if (optional_ptr.value()) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); @@ -2864,13 +2893,13 @@ fn zirOptionalPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const opt_type = operand.ty; if (opt_type.zigTypeTag() != .Optional) { return sema.mod.fail(&block.base, src, "expected optional type, found {}", .{opt_type}); @@ -2902,13 +2931,13 @@ fn zirErrUnionPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, operand.src, "expected error union type, found '{}'", .{operand.ty}); @@ -2936,19 +2965,19 @@ fn zirErrUnionPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); assert(operand.ty.zigTypeTag() == .Pointer); if (operand.ty.elemType().zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found {}", .{operand.ty.elemType()}); - const operand_pointer_ty = try sema.mod.simplePtrType(sema.arena, operand.ty.elemType().castTag(.error_union).?.data.payload, !operand.ty.isConstPtr(), .One); + const operand_pointer_ty = try Module.simplePtrType(sema.arena, operand.ty.elemType().castTag(.error_union).?.data.payload, !operand.ty.isConstPtr(), .One); if (operand.value()) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); @@ -2975,13 +3004,13 @@ fn zirErrUnionPayloadPtr( } /// Value in, value out -fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand.ty}); @@ -3001,13 +3030,13 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner } /// Pointer in, value out -fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); assert(operand.ty.zigTypeTag() == .Pointer); if (operand.ty.elemType().zigTypeTag() != .ErrorUnion) @@ -3035,7 +3064,7 @@ fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand.ty}); if (operand.ty.castTag(.error_union).?.data.payload.zigTypeTag() != .Void) { @@ -3048,7 +3077,7 @@ fn zirFunc( block: *Scope.Block, inst: Zir.Inst.Index, inferred_error_set: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3099,7 +3128,7 @@ fn funcCommon( is_extern: bool, src_locs: Zir.Inst.Func.SrcLocs, opt_lib_name: ?[]const u8, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const bare_return_type = try sema.resolveType(block, ret_ty_src, zir_return_type); @@ -3240,7 +3269,7 @@ fn funcCommon( return result; } -fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3248,7 +3277,7 @@ fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air. return sema.analyzeAs(block, .unneeded, bin_inst.lhs, bin_inst.rhs); } -fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3264,18 +3293,18 @@ fn analyzeAs( src: LazySrcLoc, zir_dest_type: Zir.Inst.Ref, zir_operand: Zir.Inst.Ref, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const dest_type = try sema.resolveType(block, src, zir_dest_type); - const operand = try sema.resolveInst(zir_operand); + const operand = sema.resolveInst(zir_operand); return sema.coerce(block, dest_type, operand, src); } -fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); if (ptr.ty.zigTypeTag() != .Pointer) { const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr.ty}); @@ -3287,7 +3316,7 @@ fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(src, ty, .ptrtoint, ptr); } -fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3296,7 +3325,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); - const object = try sema.resolveInst(extra.lhs); + const object = sema.resolveInst(extra.lhs); const object_ptr = if (object.ty.zigTypeTag() == .Pointer) object else @@ -3305,7 +3334,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3314,11 +3343,11 @@ fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); - const object_ptr = try sema.resolveInst(extra.lhs); + const object_ptr = sema.resolveInst(extra.lhs); return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3326,14 +3355,14 @@ fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; - const object = try sema.resolveInst(extra.lhs); + const object = sema.resolveInst(extra.lhs); const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); const object_ptr = try sema.analyzeRef(block, src, object); const result_ptr = try sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3341,12 +3370,12 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; - const object_ptr = try sema.resolveInst(extra.lhs); + const object_ptr = sema.resolveInst(extra.lhs); const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3357,7 +3386,7 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); + const operand = sema.resolveInst(extra.rhs); const dest_is_comptime_int = switch (dest_type.zigTypeTag()) { .ComptimeInt => true, @@ -3389,20 +3418,21 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten int", .{}); } -fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); - return sema.bitcast(block, dest_type, operand); + const operand = sema.resolveInst(extra.rhs); + return sema.bitcast(block, dest_type, operand, operand_src); } -fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3413,7 +3443,7 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); + const operand = sema.resolveInst(extra.rhs); const dest_is_comptime_float = switch (dest_type.zigTypeTag()) { .ComptimeFloat => true, @@ -3445,22 +3475,22 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten float", .{}); } -fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const array = try sema.resolveInst(bin_inst.lhs); + const array = sema.resolveInst(bin_inst.lhs); const array_ptr = if (array.ty.zigTypeTag() == .Pointer) array else try sema.analyzeRef(block, sema.src, array); - const elem_index = try sema.resolveInst(bin_inst.rhs); + const elem_index = sema.resolveInst(bin_inst.rhs); const result_ptr = try sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); return sema.analyzeLoad(block, sema.src, result_ptr, sema.src); } -fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3468,27 +3498,27 @@ fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE const src = inst_data.src(); const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const array = try sema.resolveInst(extra.lhs); + const array = sema.resolveInst(extra.lhs); const array_ptr = if (array.ty.zigTypeTag() == .Pointer) array else try sema.analyzeRef(block, src, array); - const elem_index = try sema.resolveInst(extra.rhs); + const elem_index = sema.resolveInst(extra.rhs); const result_ptr = try sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const array_ptr = try sema.resolveInst(bin_inst.lhs); - const elem_index = try sema.resolveInst(bin_inst.rhs); + const array_ptr = sema.resolveInst(bin_inst.lhs); + const elem_index = sema.resolveInst(bin_inst.rhs); return sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); } -fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3496,39 +3526,39 @@ fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE const src = inst_data.src(); const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const elem_index = try sema.resolveInst(extra.rhs); + const array_ptr = sema.resolveInst(extra.lhs); + const elem_index = sema.resolveInst(extra.rhs); return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); } -fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.SliceStart, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const start = try sema.resolveInst(extra.start); + const array_ptr = sema.resolveInst(extra.lhs); + const start = sema.resolveInst(extra.start); return sema.analyzeSlice(block, src, array_ptr, start, null, null, .unneeded); } -fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.SliceEnd, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const start = try sema.resolveInst(extra.start); - const end = try sema.resolveInst(extra.end); + const array_ptr = sema.resolveInst(extra.lhs); + const start = sema.resolveInst(extra.start); + const end = sema.resolveInst(extra.end); return sema.analyzeSlice(block, src, array_ptr, start, end, null, .unneeded); } -fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3536,10 +3566,10 @@ fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const sentinel_src: LazySrcLoc = .{ .node_offset_slice_sentinel = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.SliceSentinel, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const start = try sema.resolveInst(extra.start); - const end = try sema.resolveInst(extra.end); - const sentinel = try sema.resolveInst(extra.sentinel); + const array_ptr = sema.resolveInst(extra.lhs); + const start = sema.resolveInst(extra.start); + const end = sema.resolveInst(extra.end); + const sentinel = sema.resolveInst(extra.sentinel); return sema.analyzeSlice(block, src, array_ptr, start, end, sentinel, sentinel_src); } @@ -3550,7 +3580,7 @@ fn zirSwitchCapture( inst: Zir.Inst.Index, is_multi: bool, is_ref: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3569,7 +3599,7 @@ fn zirSwitchCaptureElse( block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3588,7 +3618,7 @@ fn zirSwitchBlock( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3597,7 +3627,7 @@ fn zirSwitchBlock( const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index); - const operand_ptr = try sema.resolveInst(extra.data.operand); + const operand_ptr = sema.resolveInst(extra.data.operand); const operand = if (is_ref) try sema.analyzeLoad(block, src, operand_ptr, operand_src) else @@ -3621,7 +3651,7 @@ fn zirSwitchBlockMulti( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3630,7 +3660,7 @@ fn zirSwitchBlockMulti( const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.SwitchBlockMulti, inst_data.payload_index); - const operand_ptr = try sema.resolveInst(extra.data.operand); + const operand_ptr = sema.resolveInst(extra.data.operand); const operand = if (is_ref) try sema.analyzeLoad(block, src, operand_ptr, operand_src) else @@ -3651,14 +3681,14 @@ fn zirSwitchBlockMulti( fn analyzeSwitch( sema: *Sema, block: *Scope.Block, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, extra_end: usize, special_prong: Zir.SpecialProng, scalar_cases_len: usize, multi_cases_len: usize, switch_inst: Zir.Inst.Index, src_node_offset: i32, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const gpa = sema.gpa; const mod = sema.mod; @@ -4217,7 +4247,7 @@ fn analyzeSwitch( const bool_ty = comptime Type.initTag(.bool); for (items) |item_ref| { - const item = try sema.resolveInst(item_ref); + const item = sema.resolveInst(item_ref); _ = try sema.resolveConstValue(&child_block, item.src, item); const cmp_ok = try case_block.addBinOp(item.src, bool_ty, .cmp_eq, operand, item); @@ -4235,8 +4265,8 @@ fn analyzeSwitch( const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; - const item_first = try sema.resolveInst(first_ref); - const item_last = try sema.resolveInst(last_ref); + const item_first = sema.resolveInst(first_ref); + const item_last = sema.resolveInst(last_ref); _ = try sema.resolveConstValue(&child_block, item_first.src, item_first); _ = try sema.resolveConstValue(&child_block, item_last.src, item_last); @@ -4334,7 +4364,7 @@ fn resolveSwitchItemVal( switch_prong_src: Module.SwitchProngSrc, range_expand: Module.SwitchProngSrc.RangeExpand, ) InnerError!TypedValue { - const item = try sema.resolveInst(item_ref); + const item = sema.resolveInst(item_ref); // We have to avoid the other helper functions here because we cannot construct a LazySrcLoc // because we only have the switch AST node. Only if we know for sure we need to report // a compile error do we resolve the full source locations. @@ -4513,7 +4543,7 @@ fn validateSwitchNoRange( return sema.mod.failWithOwnedErrorMsg(&block.base, msg); } -fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; _ = extra; @@ -4522,7 +4552,7 @@ fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, src, "TODO implement zirHasField", .{}); } -fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -4547,7 +4577,7 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return mod.constBool(arena, src, false); } -fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4572,13 +4602,13 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return mod.constType(sema.arena, src, file_root_decl.ty); } -fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; _ = inst; return sema.mod.fail(&block.base, sema.src, "TODO implement zirRetErrValueCode", .{}); } -fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4587,7 +4617,7 @@ fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.mod.fail(&block.base, sema.src, "TODO implement zirShl", .{}); } -fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4599,8 +4629,8 @@ fn zirBitwise( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, - ir_tag: ir.Inst.Tag, -) InnerError!Air.Inst.Index { + air_tag: Air.Inst.Tag, +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4609,8 +4639,8 @@ fn zirBitwise( const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); @@ -4655,10 +4685,10 @@ fn zirBitwise( } try sema.requireRuntimeBlock(block, src); - return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); + return block.addBinOp(src, scalar_type, air_tag, casted_lhs, casted_rhs); } -fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4666,7 +4696,7 @@ fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.fail(&block.base, sema.src, "TODO implement zirBitNot", .{}); } -fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4674,7 +4704,7 @@ fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayCat", .{}); } -fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4687,7 +4717,7 @@ fn zirNegate( block: *Scope.Block, inst: Zir.Inst.Index, tag_override: Zir.Inst.Tag, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4695,13 +4725,13 @@ fn zirNegate( const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - const lhs = try sema.resolveInst(.zero); - const rhs = try sema.resolveInst(inst_data.operand); + const lhs = sema.resolveInst(.zero); + const rhs = sema.resolveInst(inst_data.operand); return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src); } -fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4711,8 +4741,8 @@ fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); return sema.analyzeArithmetic(block, tag_override, lhs, rhs, sema.src, lhs_src, rhs_src); } @@ -4721,7 +4751,7 @@ fn zirOverflowArithmetic( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4735,12 +4765,12 @@ fn analyzeArithmetic( sema: *Sema, block: *Scope.Block, zir_tag: Zir.Inst.Tag, - lhs: Air.Inst.Index, - rhs: Air.Inst.Index, + lhs: Air.Inst.Ref, + rhs: Air.Inst.Ref, src: LazySrcLoc, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); @@ -4850,14 +4880,14 @@ fn analyzeArithmetic( return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); } -fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr_src: LazySrcLoc = .{ .node_offset_deref_ptr = inst_data.src_node }; - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); return sema.analyzeLoad(block, src, ptr, ptr_src); } @@ -4865,7 +4895,7 @@ fn zirAsm( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4915,7 +4945,7 @@ fn zirAsm( const name = sema.code.nullTerminatedString(input.data.name); _ = name; // TODO: use the name - arg.* = try sema.resolveInst(input.data.operand); + arg.* = sema.resolveInst(input.data.operand); inputs[arg_i] = sema.code.nullTerminatedString(input.data.constraint); } @@ -4949,7 +4979,7 @@ fn zirCmp( block: *Scope.Block, inst: Zir.Inst.Index, op: std.math.CompareOperator, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4960,8 +4990,8 @@ fn zirCmp( const src: LazySrcLoc = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); const is_equality_cmp = switch (op) { .eq, .neq => true, @@ -5047,7 +5077,7 @@ fn zirCmp( return block.addBinOp(src, bool_type, tag, casted_lhs, casted_rhs); } -fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5057,7 +5087,7 @@ fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.constIntUnsigned(sema.arena, src, Type.initTag(.comptime_int), abi_size); } -fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5071,7 +5101,7 @@ fn zirThis( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirThis", .{}); } @@ -5080,7 +5110,7 @@ fn zirRetAddr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirRetAddr", .{}); } @@ -5089,12 +5119,12 @@ fn zirBuiltinSrc( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinSrc", .{}); } -fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); @@ -5137,31 +5167,31 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } } -fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.mod.constType(sema.arena, src, operand.ty); } -fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand_ptr = try sema.resolveInst(inst_data.operand); + const operand_ptr = sema.resolveInst(inst_data.operand); const elem_ty = operand_ptr.ty.elemType(); return sema.mod.constType(sema.arena, src, elem_ty); } -fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirTypeofLog2IntType", .{}); } -fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirLog2IntType", .{}); @@ -5171,7 +5201,7 @@ fn zirTypeofPeer( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5183,20 +5213,20 @@ fn zirTypeofPeer( defer sema.gpa.free(inst_list); for (args) |arg_ref, i| { - inst_list[i] = try sema.resolveInst(arg_ref); + inst_list[i] = sema.resolveInst(arg_ref); } const result_type = try sema.resolvePeerTypes(block, src, inst_list); return sema.mod.constType(sema.arena, src, result_type); } -fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const uncasted_operand = try sema.resolveInst(inst_data.operand); + const uncasted_operand = sema.resolveInst(inst_data.operand); const bool_type = Type.initTag(.bool); const operand = try sema.coerce(block, bool_type, uncasted_operand, uncasted_operand.src); @@ -5212,16 +5242,16 @@ fn zirBoolOp( block: *Scope.Block, inst: Zir.Inst.Index, comptime is_bool_or: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = .unneeded; const bool_type = Type.initTag(.bool); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const uncasted_lhs = try sema.resolveInst(bin_inst.lhs); + const uncasted_lhs = sema.resolveInst(bin_inst.lhs); const lhs = try sema.coerce(block, bool_type, uncasted_lhs, uncasted_lhs.src); - const uncasted_rhs = try sema.resolveInst(bin_inst.rhs); + const uncasted_rhs = sema.resolveInst(bin_inst.rhs); const rhs = try sema.coerce(block, bool_type, uncasted_rhs, uncasted_rhs.src); if (lhs.value()) |lhs_val| { @@ -5234,7 +5264,7 @@ fn zirBoolOp( } } try sema.requireRuntimeBlock(block, src); - const tag: ir.Inst.Tag = if (is_bool_or) .bool_or else .bool_and; + const tag: Air.Inst.Tag = if (is_bool_or) .bool_or else .bool_and; return block.addBinOp(src, bool_type, tag, lhs, rhs); } @@ -5243,14 +5273,14 @@ fn zirBoolBr( parent_block: *Scope.Block, inst: Zir.Inst.Index, is_bool_or: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const datas = sema.code.instructions.items(.data); const inst_data = datas[inst].bool_br; const src: LazySrcLoc = .unneeded; - const lhs = try sema.resolveInst(inst_data.lhs); + const lhs = sema.resolveInst(inst_data.lhs); const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; @@ -5313,13 +5343,13 @@ fn zirIsNonNull( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.analyzeIsNull(block, src, operand, true); } @@ -5327,33 +5357,33 @@ fn zirIsNonNullPtr( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNull(block, src, loaded, true); } -fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.analyzeIsNonErr(block, inst_data.src(), operand); } -fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNonErr(block, src, loaded); } @@ -5374,7 +5404,7 @@ fn zirCondbr( const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - const uncasted_cond = try sema.resolveInst(extra.data.condition); + const uncasted_cond = sema.resolveInst(extra.data.condition); const cond = try sema.coerce(parent_block, Type.initTag(.bool), uncasted_cond, cond_src); if (try sema.resolveDefinedValue(parent_block, src, cond)) |cond_val| { @@ -5456,7 +5486,7 @@ fn zirRetCoerce( defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_tok; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.analyzeRet(block, operand, src, need_coercion); @@ -5467,7 +5497,7 @@ fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.analyzeRet(block, operand, src, false); @@ -5476,7 +5506,7 @@ fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError fn analyzeRet( sema: *Sema, block: *Scope.Block, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, src: LazySrcLoc, need_coercion: bool, ) InnerError!Zir.Inst.Index { @@ -5511,7 +5541,7 @@ fn floatOpAllowed(tag: Zir.Inst.Tag) bool { }; } -fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5532,7 +5562,7 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.mod.constType(sema.arena, .unneeded, ty); } -fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5586,7 +5616,7 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.constType(sema.arena, src, ty); } -fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5600,13 +5630,13 @@ fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In }); } -fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnionInitPtr", .{}); } -fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); @@ -5657,7 +5687,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return mod.failWithOwnedErrorMsg(&block.base, msg); } found_fields[field_index] = item.data.field_type; - field_inits[field_index] = try sema.resolveInst(item.data.init); + field_inits[field_index] = sema.resolveInst(item.data.init); } var root_msg: ?*Module.ErrorMsg = null; @@ -5719,7 +5749,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return mod.fail(&block.base, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{}); } -fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5727,7 +5757,7 @@ fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ return sema.mod.fail(&block.base, src, "TODO: Sema.zirStructInitAnon", .{}); } -fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5735,7 +5765,7 @@ fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInit", .{}); } -fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5743,13 +5773,13 @@ fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_r return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInitAnon", .{}); } -fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldTypeRef", .{}); } -fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const src = inst_data.src(); @@ -5771,7 +5801,7 @@ fn zirErrorReturnTrace( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorReturnTrace", .{}); } @@ -5780,7 +5810,7 @@ fn zirFrame( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrame", .{}); } @@ -5789,91 +5819,91 @@ fn zirFrameAddress( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameAddress", .{}); } -fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignOf", .{}); } -fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBoolToInt", .{}); } -fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirEmbedFile", .{}); } -fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorName", .{}); } -fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnaryMath", .{}); } -fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTagName", .{}); } -fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReify", .{}); } -fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTypeName", .{}); } -fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameType", .{}); } -fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameSize", .{}); } -fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFloatToInt", .{}); } -fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirIntToFloat", .{}); } -fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; - const operand_res = try sema.resolveInst(extra.rhs); + const operand_res = sema.resolveInst(extra.rhs); const operand_coerced = try sema.coerce(block, Type.initTag(.usize), operand_res, operand_src); const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5929,199 +5959,199 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(src, type_res, .bitcast, operand_coerced); } -fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrSetCast", .{}); } -fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPtrCast", .{}); } -fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTruncate", .{}); } -fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignCast", .{}); } -fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirClz", .{}); } -fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCtz", .{}); } -fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPopCount", .{}); } -fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirByteSwap", .{}); } -fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitReverse", .{}); } -fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivExact", .{}); } -fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivFloor", .{}); } -fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivTrunc", .{}); } -fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMod", .{}); } -fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirRem", .{}); } -fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShlExact", .{}); } -fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShrExact", .{}); } -fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitOffsetOf", .{}); } -fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirOffsetOf", .{}); } -fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCmpxchg", .{}); } -fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirSplat", .{}); } -fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReduce", .{}); } -fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShuffle", .{}); } -fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicLoad", .{}); } -fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicRmw", .{}); } -fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicStore", .{}); } -fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMulAdd", .{}); } -fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinCall", .{}); } -fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldPtrType", .{}); } -fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldParentPtr", .{}); } -fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy", .{}); } -fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset", .{}); } -fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinAsyncCall", .{}); } -fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirResume", .{}); @@ -6132,7 +6162,7 @@ fn zirAwait( block: *Scope.Block, inst: Zir.Inst.Index, is_nosuspend: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -6144,7 +6174,7 @@ fn zirVarExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const src = sema.src; const ty_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at type @@ -6210,7 +6240,7 @@ fn zirFuncExtended( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -6277,7 +6307,7 @@ fn zirCUndef( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCUndef", .{}); @@ -6287,7 +6317,7 @@ fn zirCInclude( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCInclude", .{}); @@ -6297,7 +6327,7 @@ fn zirCDefine( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCDefine", .{}); @@ -6307,7 +6337,7 @@ fn zirWasmMemorySize( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemorySize", .{}); @@ -6317,7 +6347,7 @@ fn zirWasmMemoryGrow( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemoryGrow", .{}); @@ -6327,7 +6357,7 @@ fn zirBuiltinExtern( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinExtern", .{}); @@ -6361,7 +6391,7 @@ pub const PanicId = enum { invalid_error_code, }; -fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Index, panic_id: PanicId) !void { +fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Ref, panic_id: PanicId) !void { const block_inst = try sema.arena.create(Inst.Block); block_inst.* = .{ .base = .{ @@ -6423,7 +6453,7 @@ fn panicWithMsg( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - msg_inst: Air.Inst.Index, + msg_inst: Air.Inst.Ref, ) !Zir.Inst.Index { const mod = sema.mod; const arena = sema.arena; @@ -6439,7 +6469,7 @@ fn panicWithMsg( const panic_fn = try sema.getBuiltin(block, src, "panic"); const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try mod.simplePtrType(arena, stack_trace_ty, true, .One); + const ptr_stack_trace_ty = try Module.simplePtrType(arena, stack_trace_ty, true, .One); const null_stack_trace = try mod.constInst(arena, src, .{ .ty = try mod.optionalType(arena, ptr_stack_trace_ty), .val = Value.initTag(.null_value), @@ -6500,10 +6530,10 @@ fn namedFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - object_ptr: Air.Inst.Index, + object_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; @@ -6579,7 +6609,7 @@ fn namedFieldPtr( } else (try mod.getErrorValue(field_name)).key; return mod.constInst(arena, src, .{ - .ty = try mod.simplePtrType(arena, child_type, false, .One), + .ty = try Module.simplePtrType(arena, child_type, false, .One), .val = try Value.Tag.ref_val.create( arena, try Value.Tag.@"error".create(arena, .{ @@ -6633,7 +6663,7 @@ fn namedFieldPtr( const field_index_u32 = @intCast(u32, field_index); const enum_val = try Value.Tag.enum_field_index.create(arena, field_index_u32); return mod.constInst(arena, src, .{ - .ty = try mod.simplePtrType(arena, child_type, false, .One), + .ty = try Module.simplePtrType(arena, child_type, false, .One), .val = try Value.Tag.ref_val.create(arena, enum_val), }); }, @@ -6653,7 +6683,7 @@ fn analyzeNamespaceLookup( src: LazySrcLoc, namespace: *Scope.Namespace, decl_name: []const u8, -) InnerError!?Air.Inst.Index { +) InnerError!?Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; if (try sema.lookupInNamespace(namespace, decl_name)) |decl| { @@ -6677,11 +6707,11 @@ fn analyzeStructFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - struct_ptr: Air.Inst.Index, + struct_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; assert(unresolved_struct_ty.zigTypeTag() == .Struct); @@ -6692,7 +6722,7 @@ fn analyzeStructFieldPtr( const field_index = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadFieldAccess(block, struct_obj, field_name_src, field_name); const field = struct_obj.fields.values()[field_index]; - const ptr_field_ty = try mod.simplePtrType(arena, field.ty, true, .One); + const ptr_field_ty = try Module.simplePtrType(arena, field.ty, true, .One); if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { return mod.constInst(arena, src, .{ @@ -6712,11 +6742,11 @@ fn analyzeUnionFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - union_ptr: Air.Inst.Index, + union_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_union_ty: Type, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; assert(unresolved_union_ty.zigTypeTag() == .Union); @@ -6728,7 +6758,7 @@ fn analyzeUnionFieldPtr( return sema.failWithBadUnionFieldAccess(block, union_obj, field_name_src, field_name); const field = union_obj.fields.values()[field_index]; - const ptr_field_ty = try mod.simplePtrType(arena, field.ty, true, .One); + const ptr_field_ty = try Module.simplePtrType(arena, field.ty, true, .One); if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| { // TODO detect inactive union field and emit compile error @@ -6749,10 +6779,10 @@ fn elemPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: Air.Inst.Index, - elem_index: Air.Inst.Index, + array_ptr: Air.Inst.Ref, + elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const array_ty = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, array_ptr.src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -6776,10 +6806,10 @@ fn elemPtrArray( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: Air.Inst.Index, - elem_index: Air.Inst.Index, + array_ptr: Air.Inst.Ref, + elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { if (array_ptr.value()) |array_ptr_val| { if (elem_index.value()) |index_val| { // Both array pointer and index are compile-time known. @@ -6804,35 +6834,41 @@ fn coerce( sema: *Sema, block: *Scope.Block, dest_type: Type, - inst: Air.Inst.Index, + inst: Air.Inst.Ref, inst_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { if (dest_type.tag() == .var_args_param) { - return sema.coerceVarArgParam(block, inst); + return sema.coerceVarArgParam(block, inst, inst_src); } + + const inst_ty = sema.getTypeOfAirRef(inst); // If the types are the same, we can return the operand. - if (dest_type.eql(inst.ty)) + if (dest_type.eql(inst_ty)) return inst; - const in_memory_result = coerceInMemoryAllowed(dest_type, inst.ty); + const in_memory_result = coerceInMemoryAllowed(dest_type, inst_ty); if (in_memory_result == .ok) { - return sema.bitcast(block, dest_type, inst); + return sema.bitcast(block, dest_type, inst, inst_src); } const mod = sema.mod; const arena = sema.arena; // undefined to anything - if (inst.value()) |val| { - if (val.isUndef() or inst.ty.zigTypeTag() == .Undefined) { - return mod.constInst(arena, inst_src, .{ .ty = dest_type, .val = val }); + if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { + if (val.isUndef() or inst_ty.zigTypeTag() == .Undefined) { + return sema.addConstant(dest_type, val); } } - assert(inst.ty.zigTypeTag() != .Undefined); + assert(inst_ty.zigTypeTag() != .Undefined); + + if (true) { + @panic("TODO finish AIR memory layout rework"); + } // T to E!T or E to E!T if (dest_type.tag() == .error_union) { - return try sema.wrapErrorUnion(block, dest_type, inst); + return try sema.wrapErrorUnion(block, dest_type, inst, inst_src); } // comptime known number to other number @@ -6844,14 +6880,14 @@ fn coerce( switch (dest_type.zigTypeTag()) { .Optional => { // null to ?T - if (inst.ty.zigTypeTag() == .Null) { + if (inst_ty.zigTypeTag() == .Null) { return mod.constInst(arena, inst_src, .{ .ty = dest_type, .val = Value.initTag(.null_value) }); } // T to ?T var buf: Type.Payload.ElemType = undefined; const child_type = dest_type.optionalChild(&buf); - if (child_type.eql(inst.ty)) { + if (child_type.eql(inst_ty)) { return sema.wrapOptional(block, dest_type, inst); } else if (try sema.coerceNum(block, child_type, inst)) |some| { return sema.wrapOptional(block, dest_type, some); @@ -6860,12 +6896,12 @@ fn coerce( .Pointer => { // Coercions where the source is a single pointer to an array. src_array_ptr: { - if (!inst.ty.isSinglePointer()) break :src_array_ptr; - const array_type = inst.ty.elemType(); + if (!inst_ty.isSinglePointer()) break :src_array_ptr; + const array_type = inst_ty.elemType(); if (array_type.zigTypeTag() != .Array) break :src_array_ptr; const array_elem_type = array_type.elemType(); - if (inst.ty.isConstPtr() and !dest_type.isConstPtr()) break :src_array_ptr; - if (inst.ty.isVolatilePtr() and !dest_type.isVolatilePtr()) break :src_array_ptr; + if (inst_ty.isConstPtr() and !dest_type.isConstPtr()) break :src_array_ptr; + if (inst_ty.isVolatilePtr() and !dest_type.isVolatilePtr()) break :src_array_ptr; const dst_elem_type = dest_type.elemType(); switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type)) { @@ -6904,11 +6940,11 @@ fn coerce( }, .Int => { // integer widening - if (inst.ty.zigTypeTag() == .Int) { + if (inst_ty.zigTypeTag() == .Int) { assert(inst.value() == null); // handled above const dst_info = dest_type.intInfo(target); - const src_info = inst.ty.intInfo(target); + const src_info = inst_ty.intInfo(target); if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or // small enough unsigned ints can get casted to large enough signed ints (src_info.signedness == .signed and dst_info.signedness == .unsigned and dst_info.bits > src_info.bits)) @@ -6920,10 +6956,10 @@ fn coerce( }, .Float => { // float widening - if (inst.ty.zigTypeTag() == .Float) { + if (inst_ty.zigTypeTag() == .Float) { assert(inst.value() == null); // handled above - const src_bits = inst.ty.floatBits(target); + const src_bits = inst_ty.floatBits(target); const dst_bits = dest_type.floatBits(target); if (dst_bits >= src_bits) { try sema.requireRuntimeBlock(block, inst_src); @@ -6933,7 +6969,7 @@ fn coerce( }, .Enum => { // enum literal to enum - if (inst.ty.zigTypeTag() == .EnumLiteral) { + if (inst_ty.zigTypeTag() == .EnumLiteral) { const val = try sema.resolveConstValue(block, inst_src, inst); const bytes = val.castTag(.enum_literal).?.data; const resolved_dest_type = try sema.resolveTypeFields(block, inst_src, dest_type); @@ -6965,7 +7001,7 @@ fn coerce( else => {}, } - return mod.fail(&block.base, inst_src, "expected {}, found {}", .{ dest_type, inst.ty }); + return mod.fail(&block.base, inst_src, "expected {}, found {}", .{ dest_type, inst_ty }); } const InMemoryCoercionResult = enum { @@ -6982,7 +7018,7 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult return .no_match; } -fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) InnerError!?Air.Inst.Index { +fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) InnerError!?Air.Inst.Index { const val = inst.value() orelse return null; const src_zig_tag = inst.ty.zigTypeTag(); const dst_zig_tag = dest_type.zigTypeTag(); @@ -7020,9 +7056,15 @@ fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.I return null; } -fn coerceVarArgParam(sema: *Sema, block: *Scope.Block, inst: Air.Inst.Index) !Air.Inst.Index { - switch (inst.ty.zigTypeTag()) { - .ComptimeInt, .ComptimeFloat => return sema.mod.fail(&block.base, inst.src, "integer and float literals in var args function must be casted", .{}), +fn coerceVarArgParam( + sema: *Sema, + block: *Scope.Block, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) !Air.Inst.Ref { + const inst_ty = sema.getTypeOfAirRef(inst); + switch (inst_ty.zigTypeTag()) { + .ComptimeInt, .ComptimeFloat => return sema.mod.fail(&block.base, inst_src, "integer and float literals in var args function must be casted", .{}), else => {}, } // TODO implement more of this function. @@ -7033,8 +7075,8 @@ fn storePtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - ptr: Air.Inst.Index, - uncasted_value: Air.Inst.Index, + ptr: Air.Inst.Ref, + uncasted_value: Air.Inst.Ref, ) !void { if (ptr.ty.isConstPtr()) return sema.mod.fail(&block.base, src, "cannot assign to constant", .{}); @@ -7082,17 +7124,23 @@ fn storePtr( _ = try block.addBinOp(src, Type.initTag(.void), .store, ptr, value); } -fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { - if (inst.value()) |val| { +fn bitcast( + sema: *Sema, + block: *Scope.Block, + dest_type: Type, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) InnerError!Air.Inst.Ref { + if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { // Keep the comptime Value representation; take the new type. - return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); + return sema.addConstant(dest_type, val); } // TODO validate the type size and other compile errors - try sema.requireRuntimeBlock(block, inst.src); - return block.addUnOp(inst.src, dest_type, .bitcast, inst); + try sema.requireRuntimeBlock(block, inst_src); + return block.addTyOp(.bitcast, dest_type, inst); } -fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) InnerError!Air.Inst.Ref { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7100,7 +7148,7 @@ fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{}); } -fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Ref { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7108,12 +7156,12 @@ fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); } -fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index { +fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Ref { const decl_ref = try sema.analyzeDeclRef(block, src, decl); return sema.analyzeLoad(block, src, decl_ref, src); } -fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index { +fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Ref { try sema.mod.declareDeclDependency(sema.owner_decl, decl); sema.mod.ensureDeclAnalyzed(decl) catch |err| { if (sema.func) |func| { @@ -7128,43 +7176,41 @@ fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl if (decl_tv.val.tag() == .variable) { return sema.analyzeVarRef(block, src, decl_tv); } - return sema.mod.constInst(sema.arena, src, .{ - .ty = try sema.mod.simplePtrType(sema.arena, decl_tv.ty, false, .One), - .val = try Value.Tag.decl_ref.create(sema.arena, decl), - }); + return sema.addConstant( + try Module.simplePtrType(sema.arena, decl_tv.ty, false, .One), + try Value.Tag.decl_ref.create(sema.arena, decl), + ); } -fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!Air.Inst.Index { +fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!Air.Inst.Ref { const variable = tv.val.castTag(.variable).?.data; - const ty = try sema.mod.simplePtrType(sema.arena, tv.ty, variable.is_mutable, .One); + const ty = try Module.simplePtrType(sema.arena, tv.ty, variable.is_mutable, .One); if (!variable.is_mutable and !variable.is_extern) { - return sema.mod.constInst(sema.arena, src, .{ - .ty = ty, - .val = try Value.Tag.ref_val.create(sema.arena, variable.init), - }); + return sema.addConstant(ty, try Value.Tag.ref_val.create(sema.arena, variable.init)); } + const gpa = sema.gpa; try sema.requireRuntimeBlock(block, src); - const inst = try sema.arena.create(Inst.VarPtr); - inst.* = .{ - .base = .{ - .tag = .varptr, - .ty = ty, - .src = src, - }, - .variable = variable, - }; - try block.instructions.append(sema.gpa, &inst.base); - return &inst.base; + try sema.air_variables.append(gpa, variable); + const result_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + try sema.air_instructions.append(gpa, .{ + .tag = .varptr, + .data = .{ .ty_pl = .{ + .ty = try sema.addType(ty), + .payload = @intCast(u32, sema.air_variables.items.len - 1), + } }, + }); + try block.instructions.append(gpa, result_inst); + return indexToRef(result_inst); } fn analyzeRef( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: Air.Inst.Index, -) InnerError!Air.Inst.Index { + operand: Air.Inst.Ref, +) InnerError!Air.Inst.Ref { const ptr_type = try sema.mod.simplePtrType(sema.arena, operand.ty, false, .One); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |val| { @@ -7182,34 +7228,32 @@ fn analyzeLoad( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - ptr: Air.Inst.Index, + ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, -) InnerError!Air.Inst.Index { - const elem_ty = switch (ptr.ty.zigTypeTag()) { - .Pointer => ptr.ty.elemType(), - else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr.ty}), +) InnerError!Air.Inst.Ref { + const ptr_ty = sema.getTypeOfAirRef(ptr); + const elem_ty = switch (ptr_ty.zigTypeTag()) { + .Pointer => ptr_ty.elemType(), + else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr_ty}), }; if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| blk: { if (ptr_val.tag() == .int_u64) break :blk; // do it at runtime - return sema.mod.constInst(sema.arena, src, .{ - .ty = elem_ty, - .val = try ptr_val.pointerDeref(sema.arena), - }); + return sema.addConstant(elem_ty, try ptr_val.pointerDeref(sema.arena)); } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, elem_ty, .load, ptr); + return block.addTyOp(.load, elem_ty, ptr); } fn analyzeIsNull( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, invert_logic: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const result_ty = Type.initTag(.bool); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |opt_val| { if (opt_val.isUndef()) { @@ -7228,8 +7272,8 @@ fn analyzeIsNonErr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: Air.Inst.Index, -) InnerError!Air.Inst.Index { + operand: Air.Inst.Ref, +) InnerError!Air.Inst.Ref { const ot = operand.ty.zigTypeTag(); if (ot != .ErrorSet and ot != .ErrorUnion) return sema.mod.constBool(sema.arena, src, true); if (ot == .ErrorSet) return sema.mod.constBool(sema.arena, src, false); @@ -7249,12 +7293,12 @@ fn analyzeSlice( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: Air.Inst.Index, - start: Air.Inst.Index, + array_ptr: Air.Inst.Ref, + start: Air.Inst.Ref, end_opt: ?Air.Inst.Index, sentinel_opt: ?Air.Inst.Index, sentinel_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const ptr_child = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -7325,10 +7369,10 @@ fn cmpNumeric( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - lhs: Air.Inst.Index, - rhs: Air.Inst.Index, + lhs: Air.Inst.Ref, + rhs: Air.Inst.Ref, op: std.math.CompareOperator, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { assert(lhs.ty.isNumeric()); assert(rhs.ty.isNumeric()); @@ -7494,7 +7538,7 @@ fn cmpNumeric( return block.addBinOp(src, Type.initTag(.bool), Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } -fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Index { if (inst.value()) |val| { return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); } @@ -7503,9 +7547,15 @@ fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Ins return block.addUnOp(inst.src, dest_type, .wrap_optional, inst); } -fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn wrapErrorUnion( + sema: *Sema, + block: *Scope.Block, + dest_type: Type, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) !Air.Inst.Index { const err_union = dest_type.castTag(.error_union).?; - if (inst.value()) |val| { + if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { if (inst.ty.zigTypeTag() != .ErrorSet) { _ = try sema.coerce(block, err_union.data.payload, inst, inst.src); } else switch (err_union.data.error_set.tag()) { @@ -7710,7 +7760,7 @@ fn getBuiltin( block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const std_pkg = mod.root_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; @@ -7938,6 +7988,68 @@ fn enumFieldSrcLoc( } else unreachable; } +/// Returns the type of the AIR instruction. +fn getTypeOfAirRef(sema: *Sema, air_ref: Air.Inst.Ref) Type { + switch (air_ref) { + .none => unreachable, + .u8_type => return Type.initTag(.u8), + .i8_type => return Type.initTag(.i8), + .u16_type => return Type.initTag(.u16), + .i16_type => return Type.initTag(.i16), + .u32_type => return Type.initTag(.u32), + .i32_type => return Type.initTag(.i32), + .u64_type => return Type.initTag(.u64), + .i64_type => return Type.initTag(.i64), + .u128_type => return Type.initTag(.u128), + .i128_type => return Type.initTag(.i128), + .usize_type => return Type.initTag(.usize), + .isize_type => return Type.initTag(.isize), + .c_short_type => return Type.initTag(.c_short), + .c_ushort_type => return Type.initTag(.c_ushort), + .c_int_type => return Type.initTag(.c_int), + .c_uint_type => return Type.initTag(.c_uint), + .c_long_type => return Type.initTag(.c_long), + .c_ulong_type => return Type.initTag(.c_ulong), + .c_longlong_type => return Type.initTag(.c_longlong), + .c_ulonglong_type => return Type.initTag(.c_ulonglong), + .c_longdouble_type => return Type.initTag(.c_longdouble), + .f16_type => return Type.initTag(.f16), + .f32_type => return Type.initTag(.f32), + .f64_type => return Type.initTag(.f64), + .f128_type => return Type.initTag(.f128), + .c_void_type => return Type.initTag(.c_void), + .bool_type => return Type.initTag(.bool), + .void_type => return Type.initTag(.void), + .type_type => return Type.initTag(.type), + .anyerror_type => return Type.initTag(.anyerror), + .comptime_int_type => return Type.initTag(.comptime_int), + .comptime_float_type => return Type.initTag(.comptime_float), + .noreturn_type => return Type.initTag(.noreturn), + .anyframe_type => return Type.initTag(.@"anyframe"), + .null_type => return Type.initTag(.@"null"), + .undefined_type => return Type.initTag(.@"undefined"), + .enum_literal_type => return Type.initTag(.enum_literal), + .atomic_ordering_type => return Type.initTag(.atomic_ordering), + .atomic_rmw_op_type => return Type.initTag(.atomic_rmw_op), + .calling_convention_type => return Type.initTag(.calling_convention), + .float_mode_type => return Type.initTag(.float_mode), + .reduce_op_type => return Type.initTag(.reduce_op), + .call_options_type => return Type.initTag(.call_options), + .export_options_type => return Type.initTag(.export_options), + .extern_options_type => return Type.initTag(.extern_options), + .manyptr_u8_type => return Type.initTag(.manyptr_u8), + .manyptr_const_u8_type => return Type.initTag(.manyptr_const_u8), + .fn_noreturn_no_args_type => return Type.initTag(.fn_noreturn_no_args), + .fn_void_no_args_type => return Type.initTag(.fn_void_no_args), + .fn_naked_noreturn_no_args_type => return Type.initTag(.fn_naked_noreturn_no_args), + .fn_ccc_void_no_args_type => return Type.initTag(.fn_ccc_void_no_args), + .single_const_pointer_to_comptime_int_type => return Type.initTag(.single_const_pointer_to_comptime_int), + .const_slice_u8_type => return Type.initTag(.const_slice_u8), + else => return sema.getAirType(air_ref), + } +} + +/// Asserts the AIR instruction is a `const_ty` and returns the type. fn getAirType(sema: *Sema, air_ref: Air.Inst.Ref) Type { var i: usize = @enumToInt(air_ref); if (i < Air.Inst.Ref.typed_value_map.len) { @@ -8014,13 +8126,27 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } +pub fn addConstant(sema: *Sema, ty: Type, val: Value) InnerError!Air.Inst.Ref { + const gpa = sema.gpa; + const ty_inst = try sema.addType(ty); + try sema.air_values.append(gpa, val); + try sema.air_instructions.append(gpa, .{ + .tag = .constant, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); +} + const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; -fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { +pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { return @intToEnum(Air.Inst.Ref, ref_start_index + inst); } -fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { +pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { const ref_int = @enumToInt(inst); if (ref_int >= ref_start_index) { return ref_int - ref_start_index; diff --git a/src/codegen.zig b/src/codegen.zig index a6c4b5ad3c..c27a1444ef 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -494,7 +494,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { defer function.blocks.deinit(bin_file.allocator); defer function.exitlude_jump_relocs.deinit(bin_file.allocator); - var call_info = function.resolveCallingConventionValues(src_loc.lazy, fn_type) catch |err| switch (err) { + var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, else => |e| return e, }; @@ -537,7 +537,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.code.items.len += 4; try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); const stack_end = self.max_end_stack; if (stack_end > math.maxInt(i32)) @@ -578,7 +578,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }); } else { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); try self.dbgSetEpilogueBegin(); } }, @@ -758,11 +758,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } // TODO inline this logic into every instruction - var i: ir.Inst.DeathsBitIndex = 0; - while (inst.getOperand(i)) |operand| : (i += 1) { - if (inst.operandDies(i)) - self.processDeath(operand); - } + @panic("TODO rework AIR memory layout codegen for processing deaths"); + //var i: ir.Inst.DeathsBitIndex = 0; + //while (inst.getOperand(i)) |operand| : (i += 1) { + // if (inst.operandDies(i)) + // self.processDeath(operand); + //} } } @@ -858,74 +859,76 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const air_tags = self.air.instructions.items(.tag); switch (air_tags[inst]) { // zig fmt: off - .add => return self.genAdd(inst.castTag(.add).?), - .addwrap => return self.genAddWrap(inst.castTag(.addwrap).?), - .sub => return self.genSub(inst.castTag(.sub).?), - .subwrap => return self.genSubWrap(inst.castTag(.subwrap).?), - .mul => return self.genMul(inst.castTag(.mul).?), - .mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?), - .div => return self.genDiv(inst.castTag(.div).?), - - .cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt), - .cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte), - .cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq), - .cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte), - .cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt), - .cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq), - - .bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), - .bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), - .bit_and => return self.genBitAnd(inst.castTag(.bit_and).?), - .bit_or => return self.genBitOr(inst.castTag(.bit_or).?), - .xor => return self.genXor(inst.castTag(.xor).?), - - .alloc => return self.genAlloc(inst.castTag(.alloc).?), - .arg => return self.genArg(inst.castTag(.arg).?), - .assembly => return self.genAsm(inst.castTag(.assembly).?), - .bitcast => return self.genBitCast(inst.castTag(.bitcast).?), - .block => return self.genBlock(inst.castTag(.block).?), - .br => return self.genBr(inst.castTag(.br).?), - .br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), - .breakpoint => return self.genBreakpoint(inst.src), - .call => return self.genCall(inst.castTag(.call).?), - .cond_br => return self.genCondBr(inst.castTag(.condbr).?), - .dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?), - .floatcast => return self.genFloatCast(inst.castTag(.floatcast).?), - .intcast => return self.genIntCast(inst.castTag(.intcast).?), - .is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?), - .is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?), - .is_null => return self.genIsNull(inst.castTag(.is_null).?), - .is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), - .is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?), - .is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?), - .is_err => return self.genIsErr(inst.castTag(.is_err).?), - .is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), - .load => return self.genLoad(inst.castTag(.load).?), - .loop => return self.genLoop(inst.castTag(.loop).?), - .not => return self.genNot(inst.castTag(.not).?), - .ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?), - .ref => return self.genRef(inst.castTag(.ref).?), - .ret => return self.genRet(inst.castTag(.ret).?), - .store => return self.genStore(inst.castTag(.store).?), - .struct_field_ptr=> return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), - .switchbr => return self.genSwitch(inst.castTag(.switchbr).?), - .varptr => return self.genVarPtr(inst.castTag(.varptr).?), - - .constant => unreachable, // excluded from function bodies - .unreach => return MCValue{ .unreach = {} }, - - .optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), - .optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), - .unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?), - .unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?), - .unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?), - .unwrap_errunion_payload_ptr=> return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?), - - .wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), - .wrap_errunion_payload => return self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), - .wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?), + //.add => return self.genAdd(inst.castTag(.add).?), + //.addwrap => return self.genAddWrap(inst.castTag(.addwrap).?), + //.sub => return self.genSub(inst.castTag(.sub).?), + //.subwrap => return self.genSubWrap(inst.castTag(.subwrap).?), + //.mul => return self.genMul(inst.castTag(.mul).?), + //.mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?), + //.div => return self.genDiv(inst.castTag(.div).?), + + //.cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt), + //.cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte), + //.cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq), + //.cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte), + //.cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt), + //.cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq), + + //.bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), + //.bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), + //.bit_and => return self.genBitAnd(inst.castTag(.bit_and).?), + //.bit_or => return self.genBitOr(inst.castTag(.bit_or).?), + //.xor => return self.genXor(inst.castTag(.xor).?), + + //.alloc => return self.genAlloc(inst.castTag(.alloc).?), + //.arg => return self.genArg(inst.castTag(.arg).?), + //.assembly => return self.genAsm(inst.castTag(.assembly).?), + //.bitcast => return self.genBitCast(inst.castTag(.bitcast).?), + //.block => return self.genBlock(inst.castTag(.block).?), + //.br => return self.genBr(inst.castTag(.br).?), + //.br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), + //.breakpoint => return self.genBreakpoint(inst.src), + //.call => return self.genCall(inst.castTag(.call).?), + //.cond_br => return self.genCondBr(inst.castTag(.condbr).?), + //.dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?), + //.floatcast => return self.genFloatCast(inst.castTag(.floatcast).?), + //.intcast => return self.genIntCast(inst.castTag(.intcast).?), + //.is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?), + //.is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?), + //.is_null => return self.genIsNull(inst.castTag(.is_null).?), + //.is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), + //.is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?), + //.is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?), + //.is_err => return self.genIsErr(inst.castTag(.is_err).?), + //.is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), + //.load => return self.genLoad(inst.castTag(.load).?), + //.loop => return self.genLoop(inst.castTag(.loop).?), + //.not => return self.genNot(inst.castTag(.not).?), + //.ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?), + //.ref => return self.genRef(inst.castTag(.ref).?), + //.ret => return self.genRet(inst.castTag(.ret).?), + //.store => return self.genStore(inst.castTag(.store).?), + //.struct_field_ptr=> return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), + //.switch_br => return self.genSwitch(inst.castTag(.switchbr).?), + //.varptr => return self.genVarPtr(inst.castTag(.varptr).?), + + //.constant => unreachable, // excluded from function bodies + //.unreach => return MCValue{ .unreach = {} }, + + //.optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), + //.optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), + //.unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?), + //.unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?), + //.unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?), + //.unwrap_errunion_payload_ptr=> return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?), + + //.wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), + //.wrap_errunion_payload => return self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), + //.wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?), // zig fmt: on + + else => @panic("TODO finish air memory layout branch, more codegen.zig instructions"), } } @@ -4785,14 +4788,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }; } - fn fail(self: *Self, src: LazySrcLoc, comptime format: []const u8, args: anytype) InnerError { + fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError { @setCold(true); assert(self.err_msg == null); - const src_loc = if (src != .unneeded) - src.toSrcLocWithDecl(self.mod_fn.owner_decl) - else - self.src_loc; - self.err_msg = try ErrorMsg.create(self.bin_file.allocator, src_loc, format, args); + self.err_msg = try ErrorMsg.create(self.bin_file.allocator, self.src_loc, format, args); return error.CodegenFail; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 4743494f35..0ee6972654 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -25,7 +25,7 @@ pub const CValue = union(enum) { /// Index into local_names, but take the address. local_ref: usize, /// A constant instruction, to be rendered inline. - constant: *Inst, + constant: Air.Inst.Index, /// Index into the parameters arg: usize, /// By-value @@ -99,7 +99,7 @@ pub const Object = struct { gpa: *mem.Allocator, code: std.ArrayList(u8), value_map: CValueMap, - blocks: std.AutoHashMapUnmanaged(*ir.Inst.Block, BlockData) = .{}, + blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, next_arg_index: usize = 0, next_local_index: usize = 0, next_block_index: usize = 0, @@ -133,7 +133,12 @@ pub const Object = struct { .none => unreachable, .local => |i| return w.print("t{d}", .{i}), .local_ref => |i| return w.print("&t{d}", .{i}), - .constant => |inst| return o.dg.renderValue(w, inst.ty, inst.value().?), + .constant => |inst| { + const ty_pl = o.air.instructions.items(.data)[inst].ty_pl; + const ty = o.air.getRefType(ty_pl.ty); + const val = o.air.values[ty_pl.payload]; + return o.dg.renderValue(w, ty, val); + }, .arg => |i| return w.print("a{d}", .{i}), .decl => |decl| return w.writeAll(mem.span(decl.name)), .decl_ref => |decl| return w.print("&{s}", .{decl.name}), @@ -213,8 +218,9 @@ pub const DeclGen = struct { error_msg: ?*Module.ErrorMsg, typedefs: TypedefMap, - fn fail(dg: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { + fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { @setCold(true); + const src: LazySrcLoc = .{ .node_offset = 0 }; const src_loc = src.toSrcLocWithDecl(dg.decl); dg.error_msg = try Module.ErrorMsg.create(dg.module.gpa, src_loc, format, args); return error.AnalysisFail; @@ -230,7 +236,7 @@ pub const DeclGen = struct { // This should lower to 0xaa bytes in safe modes, and for unsafe modes should // lower to leaving variables uninitialized (that might need to be implemented // outside of this function). - return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement renderValue undef", .{}); + return dg.fail("TODO: C backend: implement renderValue undef", .{}); } switch (t.zigTypeTag()) { .Int => { @@ -440,7 +446,7 @@ pub const DeclGen = struct { }, else => unreachable, }, - else => |e| return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement value {s}", .{ + else => |e| return dg.fail("TODO: C backend: implement value {s}", .{ @tagName(e), }), } @@ -519,14 +525,14 @@ pub const DeclGen = struct { break; } } else { - return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement integer types larger than 128 bits", .{}); + return dg.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); } }, else => unreachable, } }, - .Float => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Float", .{}), + .Float => return dg.fail("TODO: C backend: implement type Float", .{}), .Pointer => { if (t.isSlice()) { @@ -681,7 +687,7 @@ pub const DeclGen = struct { try dg.renderType(w, int_tag_ty); }, - .Union => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Union", .{}), + .Union => return dg.fail("TODO: C backend: implement type Union", .{}), .Fn => { try dg.renderType(w, t.fnReturnType()); try w.writeAll(" (*)("); @@ -704,10 +710,10 @@ pub const DeclGen = struct { } try w.writeByte(')'); }, - .Opaque => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Opaque", .{}), - .Frame => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Frame", .{}), - .AnyFrame => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type AnyFrame", .{}), - .Vector => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Vector", .{}), + .Opaque => return dg.fail("TODO: C backend: implement type Opaque", .{}), + .Frame => return dg.fail("TODO: C backend: implement type Frame", .{}), + .AnyFrame => return dg.fail("TODO: C backend: implement type AnyFrame", .{}), + .Vector => return dg.fail("TODO: C backend: implement type Vector", .{}), .Null, .Undefined, @@ -760,7 +766,8 @@ pub fn genDecl(o: *Object) !void { try o.dg.renderFunctionSignature(o.writer(), is_global); try o.writer().writeByte(' '); - try genBody(o, func.body); + const main_body = o.air.getMainBody(); + try genBody(o, main_body); try o.indent_writer.insertNewline(); return; @@ -833,9 +840,9 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { } } -pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!void { +fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void { const writer = o.writer(); - if (body.instructions.len == 0) { + if (body.len == 0) { try writer.writeAll("{}"); return; } @@ -843,82 +850,85 @@ pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!voi try writer.writeAll("{\n"); o.indent_writer.pushIndent(); - for (body.instructions) |inst| { - const result_value = switch (inst.tag) { - // TODO use a different strategy for add that communicates to the optimizer - // that wrapping is UB. - .add => try genBinOp(o, inst.castTag(.add).?, " + "), - .addwrap => try genWrapOp(o, inst.castTag(.addwrap).?, " + ", "addw_"), - // TODO use a different strategy for sub that communicates to the optimizer - // that wrapping is UB. - .sub => try genBinOp(o, inst.castTag(.sub).?, " - "), - .subwrap => try genWrapOp(o, inst.castTag(.subwrap).?, " - ", "subw_"), - // TODO use a different strategy for mul that communicates to the optimizer - // that wrapping is UB. - .mul => try genBinOp(o, inst.castTag(.sub).?, " * "), - .mulwrap => try genWrapOp(o, inst.castTag(.mulwrap).?, " * ", "mulw_"), - // TODO use a different strategy for div that communicates to the optimizer - // that wrapping is UB. - .div => try genBinOp(o, inst.castTag(.div).?, " / "), - - .constant => unreachable, // excluded from function bodies - .alloc => try genAlloc(o, inst.castTag(.alloc).?), - .arg => genArg(o), - .assembly => try genAsm(o, inst.castTag(.assembly).?), - .block => try genBlock(o, inst.castTag(.block).?), - .bitcast => try genBitcast(o, inst.castTag(.bitcast).?), - .breakpoint => try genBreakpoint(o, inst.castTag(.breakpoint).?), - .call => try genCall(o, inst.castTag(.call).?), - .cmp_eq => try genBinOp(o, inst.castTag(.cmp_eq).?, " == "), - .cmp_gt => try genBinOp(o, inst.castTag(.cmp_gt).?, " > "), - .cmp_gte => try genBinOp(o, inst.castTag(.cmp_gte).?, " >= "), - .cmp_lt => try genBinOp(o, inst.castTag(.cmp_lt).?, " < "), - .cmp_lte => try genBinOp(o, inst.castTag(.cmp_lte).?, " <= "), - .cmp_neq => try genBinOp(o, inst.castTag(.cmp_neq).?, " != "), - .dbg_stmt => try genDbgStmt(o, inst.castTag(.dbg_stmt).?), - .intcast => try genIntCast(o, inst.castTag(.intcast).?), - .load => try genLoad(o, inst.castTag(.load).?), - .ret => try genRet(o, inst.castTag(.ret).?), - .retvoid => try genRetVoid(o), - .store => try genStore(o, inst.castTag(.store).?), - .unreach => try genUnreach(o, inst.castTag(.unreach).?), - .loop => try genLoop(o, inst.castTag(.loop).?), - .condbr => try genCondBr(o, inst.castTag(.condbr).?), - .br => try genBr(o, inst.castTag(.br).?), - .br_void => try genBrVoid(o, inst.castTag(.br_void).?.block), - .switchbr => try genSwitchBr(o, inst.castTag(.switchbr).?), - // bool_and and bool_or are non-short-circuit operations - .bool_and => try genBinOp(o, inst.castTag(.bool_and).?, " & "), - .bool_or => try genBinOp(o, inst.castTag(.bool_or).?, " | "), - .bit_and => try genBinOp(o, inst.castTag(.bit_and).?, " & "), - .bit_or => try genBinOp(o, inst.castTag(.bit_or).?, " | "), - .xor => try genBinOp(o, inst.castTag(.xor).?, " ^ "), - .not => try genUnOp(o, inst.castTag(.not).?, "!"), - .is_null => try genIsNull(o, inst.castTag(.is_null).?), - .is_non_null => try genIsNull(o, inst.castTag(.is_non_null).?), - .is_null_ptr => try genIsNull(o, inst.castTag(.is_null_ptr).?), - .is_non_null_ptr => try genIsNull(o, inst.castTag(.is_non_null_ptr).?), - .wrap_optional => try genWrapOptional(o, inst.castTag(.wrap_optional).?), - .optional_payload => try genOptionalPayload(o, inst.castTag(.optional_payload).?), - .optional_payload_ptr => try genOptionalPayload(o, inst.castTag(.optional_payload_ptr).?), - .ref => try genRef(o, inst.castTag(.ref).?), - .struct_field_ptr => try genStructFieldPtr(o, inst.castTag(.struct_field_ptr).?), - - .is_err => try genIsErr(o, inst.castTag(.is_err).?, "", ".", "!="), - .is_non_err => try genIsErr(o, inst.castTag(.is_non_err).?, "", ".", "=="), - .is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?, "*", "->", "!="), - .is_non_err_ptr => try genIsErr(o, inst.castTag(.is_non_err_ptr).?, "*", "->", "=="), - - .unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?), - .unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?), - .unwrap_errunion_payload_ptr => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload_ptr).?), - .unwrap_errunion_err_ptr => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err_ptr).?), - .wrap_errunion_payload => try genWrapErrUnionPay(o, inst.castTag(.wrap_errunion_payload).?), - .wrap_errunion_err => try genWrapErrUnionErr(o, inst.castTag(.wrap_errunion_err).?), - .br_block_flat => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for br_block_flat", .{}), - .ptrtoint => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for ptrtoint", .{}), - .varptr => try genVarPtr(o, inst.castTag(.varptr).?), - .floatcast => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for floatcast", .{}), + const air_tags = o.air.instructions.items(.tag); + + for (body) |inst| { + const result_value = switch (air_tags[inst]) { + //// TODO use a different strategy for add that communicates to the optimizer + //// that wrapping is UB. + //.add => try genBinOp(o, inst.castTag(.add).?, " + "), + //.addwrap => try genWrapOp(o, inst.castTag(.addwrap).?, " + ", "addw_"), + //// TODO use a different strategy for sub that communicates to the optimizer + //// that wrapping is UB. + //.sub => try genBinOp(o, inst.castTag(.sub).?, " - "), + //.subwrap => try genWrapOp(o, inst.castTag(.subwrap).?, " - ", "subw_"), + //// TODO use a different strategy for mul that communicates to the optimizer + //// that wrapping is UB. + //.mul => try genBinOp(o, inst.castTag(.sub).?, " * "), + //.mulwrap => try genWrapOp(o, inst.castTag(.mulwrap).?, " * ", "mulw_"), + //// TODO use a different strategy for div that communicates to the optimizer + //// that wrapping is UB. + //.div => try genBinOp(o, inst.castTag(.div).?, " / "), + + //.constant => unreachable, // excluded from function bodies + //.alloc => try genAlloc(o, inst.castTag(.alloc).?), + //.arg => genArg(o), + //.assembly => try genAsm(o, inst.castTag(.assembly).?), + //.block => try genBlock(o, inst.castTag(.block).?), + //.bitcast => try genBitcast(o, inst.castTag(.bitcast).?), + //.breakpoint => try genBreakpoint(o, inst.castTag(.breakpoint).?), + //.call => try genCall(o, inst.castTag(.call).?), + //.cmp_eq => try genBinOp(o, inst.castTag(.cmp_eq).?, " == "), + //.cmp_gt => try genBinOp(o, inst.castTag(.cmp_gt).?, " > "), + //.cmp_gte => try genBinOp(o, inst.castTag(.cmp_gte).?, " >= "), + //.cmp_lt => try genBinOp(o, inst.castTag(.cmp_lt).?, " < "), + //.cmp_lte => try genBinOp(o, inst.castTag(.cmp_lte).?, " <= "), + //.cmp_neq => try genBinOp(o, inst.castTag(.cmp_neq).?, " != "), + //.dbg_stmt => try genDbgStmt(o, inst.castTag(.dbg_stmt).?), + //.intcast => try genIntCast(o, inst.castTag(.intcast).?), + //.load => try genLoad(o, inst.castTag(.load).?), + //.ret => try genRet(o, inst.castTag(.ret).?), + //.retvoid => try genRetVoid(o), + //.store => try genStore(o, inst.castTag(.store).?), + //.unreach => try genUnreach(o, inst.castTag(.unreach).?), + //.loop => try genLoop(o, inst.castTag(.loop).?), + //.condbr => try genCondBr(o, inst.castTag(.condbr).?), + //.br => try genBr(o, inst.castTag(.br).?), + //.br_void => try genBrVoid(o, inst.castTag(.br_void).?.block), + //.switchbr => try genSwitchBr(o, inst.castTag(.switchbr).?), + //// bool_and and bool_or are non-short-circuit operations + //.bool_and => try genBinOp(o, inst.castTag(.bool_and).?, " & "), + //.bool_or => try genBinOp(o, inst.castTag(.bool_or).?, " | "), + //.bit_and => try genBinOp(o, inst.castTag(.bit_and).?, " & "), + //.bit_or => try genBinOp(o, inst.castTag(.bit_or).?, " | "), + //.xor => try genBinOp(o, inst.castTag(.xor).?, " ^ "), + //.not => try genUnOp(o, inst.castTag(.not).?, "!"), + //.is_null => try genIsNull(o, inst.castTag(.is_null).?), + //.is_non_null => try genIsNull(o, inst.castTag(.is_non_null).?), + //.is_null_ptr => try genIsNull(o, inst.castTag(.is_null_ptr).?), + //.is_non_null_ptr => try genIsNull(o, inst.castTag(.is_non_null_ptr).?), + //.wrap_optional => try genWrapOptional(o, inst.castTag(.wrap_optional).?), + //.optional_payload => try genOptionalPayload(o, inst.castTag(.optional_payload).?), + //.optional_payload_ptr => try genOptionalPayload(o, inst.castTag(.optional_payload_ptr).?), + //.ref => try genRef(o, inst.castTag(.ref).?), + //.struct_field_ptr => try genStructFieldPtr(o, inst.castTag(.struct_field_ptr).?), + + //.is_err => try genIsErr(o, inst.castTag(.is_err).?, "", ".", "!="), + //.is_non_err => try genIsErr(o, inst.castTag(.is_non_err).?, "", ".", "=="), + //.is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?, "*", "->", "!="), + //.is_non_err_ptr => try genIsErr(o, inst.castTag(.is_non_err_ptr).?, "*", "->", "=="), + + //.unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?), + //.unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?), + //.unwrap_errunion_payload_ptr => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload_ptr).?), + //.unwrap_errunion_err_ptr => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err_ptr).?), + //.wrap_errunion_payload => try genWrapErrUnionPay(o, inst.castTag(.wrap_errunion_payload).?), + //.wrap_errunion_err => try genWrapErrUnionErr(o, inst.castTag(.wrap_errunion_err).?), + //.br_block_flat => return o.dg.fail("TODO: C backend: implement codegen for br_block_flat", .{}), + //.ptrtoint => return o.dg.fail("TODO: C backend: implement codegen for ptrtoint", .{}), + //.varptr => try genVarPtr(o, inst.castTag(.varptr).?), + //.floatcast => return o.dg.fail("TODO: C backend: implement codegen for floatcast", .{}), + else => return o.dg.fail("TODO: C backend: rework AIR memory layout", .{}), }; switch (result_value) { .none => {}, @@ -1060,7 +1070,7 @@ fn genWrapOp(o: *Object, inst: *Inst.BinOp, str_op: [*:0]const u8, fn_op: [*:0]c } if (bits > 64) { - return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: genWrapOp for large integers", .{}); + return o.dg.fail("TODO: C backend: genWrapOp for large integers", .{}); } var min_buf: [80]u8 = undefined; @@ -1227,7 +1237,7 @@ fn genCall(o: *Object, inst: *Inst.Call) !CValue { try writer.writeAll(");\n"); return result_local; } else { - return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement function pointers", .{}); + return o.dg.fail("TODO: C backend: implement function pointers", .{}); } } @@ -1390,13 +1400,13 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue { try o.writeCValue(writer, arg_c_value); try writer.writeAll(";\n"); } else { - return o.dg.fail(.{ .node_offset = 0 }, "TODO non-explicit inline asm regs", .{}); + return o.dg.fail("TODO non-explicit inline asm regs", .{}); } } const volatile_string: []const u8 = if (as.is_volatile) "volatile " else ""; try writer.print("__asm {s}(\"{s}\"", .{ volatile_string, as.asm_source }); if (as.output_constraint) |_| { - return o.dg.fail(.{ .node_offset = 0 }, "TODO: CBE inline asm output", .{}); + return o.dg.fail("TODO: CBE inline asm output", .{}); } if (as.inputs.len > 0) { if (as.output_constraint == null) { @@ -1421,7 +1431,7 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue { if (as.base.isUnused()) return CValue.none; - return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: inline asm expression result used", .{}); + return o.dg.fail("TODO: C backend: inline asm expression result used", .{}); } fn genIsNull(o: *Object, inst: *Inst.UnOp) !CValue { diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 0d05b97846..c93f04f618 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2519,6 +2519,9 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); + var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_line_buffer.deinit(); + var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); defer dbg_info_buffer.deinit(); diff --git a/src/value.zig b/src/value.zig index 48cd6fffc4..0f7194d8c1 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1700,7 +1700,7 @@ pub const Value = extern union { /// peer type resolution. This is stored in a separate list so that /// the items are contiguous in memory and thus can be passed to /// `Module.resolvePeerTypes`. - stored_inst_list: std.ArrayListUnmanaged(*ir.Inst) = .{}, + stored_inst_list: std.ArrayListUnmanaged(Air.Inst.Index) = .{}, }, }; -- cgit v1.2.3 From c020a302960c499ffe811dd0601a2d386c191b91 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Jul 2021 21:57:40 -0700 Subject: Sema: remove br_block_flat AIR instruction Thanks to the new AIR memory layout, we can do this by turning a br operand into a block, rather than having this special purpose instruction. --- BRANCH_TODO | 42 -------------- src/Air.zig | 4 -- src/Liveness.zig | 2 - src/Module.zig | 2 +- src/Sema.zig | 170 ++++++++++++++++++++++++++++++++++--------------------- 5 files changed, 105 insertions(+), 115 deletions(-) (limited to 'src/Liveness.zig') diff --git a/BRANCH_TODO b/BRANCH_TODO index aaba8b70b3..9055cda307 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -16,48 +16,6 @@ return inst.val; } - pub fn breakBlock(base: *Inst) ?*Block { - return switch (base.tag) { - .br => base.castTag(.br).?.block, - .br_void => base.castTag(.br_void).?.block, - .br_block_flat => base.castTag(.br_block_flat).?.block, - else => null, - }; - } - - pub const convertable_br_size = std.math.max(@sizeOf(BrBlockFlat), @sizeOf(Br)); - pub const convertable_br_align = std.math.max(@alignOf(BrBlockFlat), @alignOf(Br)); - comptime { - assert(@offsetOf(BrBlockFlat, "base") == @offsetOf(Br, "base")); - } - - pub const BrBlockFlat = struct { - pub const base_tag = Tag.br_block_flat; - - base: Inst, - block: *Block, - body: Body, - - pub fn operandCount(self: *const BrBlockFlat) usize { - _ = self; - return 0; - } - pub fn getOperand(self: *const BrBlockFlat, index: usize) ?*Inst { - _ = self; - _ = index; - return null; - } - }; - - /// Same as `br` except the operand is a list of instructions to be treated as - /// a flat block; that is there is only 1 break instruction from the block, and - /// it is implied to be after the last instruction, and the last instruction is - /// the break operand. - /// This instruction exists for late-stage semantic analysis patch ups, to - /// replace one br operand with multiple instructions, without moving anything else around. - br_block_flat, - - /// For debugging purposes, prints a function representation to stderr. diff --git a/src/Air.zig b/src/Air.zig index e2eeae1130..60e6e9933d 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -308,10 +308,6 @@ pub const Inst = struct { operand: Ref, payload: u32, }, - constant: struct { - ty: Type, - val: Value, - }, dbg_stmt: struct { line: u32, column: u32, diff --git a/src/Liveness.zig b/src/Liveness.zig index 838f19d4a1..98af9eb429 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -299,8 +299,6 @@ fn analyzeInst( const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); const body = a.air.extra[extra.end..][0..extra.data.body_len]; try analyzeWithContext(a, new_set, body); - // We let this continue so that it can possibly mark the block as - // unreferenced below. return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }); }, .loop => { diff --git a/src/Module.zig b/src/Module.zig index 4bd48dad05..94d8b63744 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1185,7 +1185,7 @@ pub const Scope = struct { block_inst: Air.Inst.Index, /// Separate array list from break_inst_list so that it can be passed directly /// to resolvePeerTypes. - results: ArrayListUnmanaged(Air.Inst.Index), + results: ArrayListUnmanaged(Air.Inst.Ref), /// Keeps track of the break instructions so that the operand can be replaced /// if we need to add type coercion at the end of block analysis. /// Same indexes, capacity, length as `results`. diff --git a/src/Sema.zig b/src/Sema.zig index 48ad8d97fc..b4e8cd5af5 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -163,36 +163,36 @@ pub fn analyzeBody( const air_inst: Air.Inst.Ref = switch (tags[inst]) { // zig fmt: off .arg => try sema.zirArg(block, inst), - //.alloc => try sema.zirAlloc(block, inst), - //.alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), - //.alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), - //.alloc_inferred_comptime => try sema.zirAllocInferredComptime(block, inst), - //.alloc_mut => try sema.zirAllocMut(block, inst), - //.alloc_comptime => try sema.zirAllocComptime(block, inst), - //.anyframe_type => try sema.zirAnyframeType(block, inst), - //.array_cat => try sema.zirArrayCat(block, inst), - //.array_mul => try sema.zirArrayMul(block, inst), - //.array_type => try sema.zirArrayType(block, inst), - //.array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), - //.vector_type => try sema.zirVectorType(block, inst), - //.as => try sema.zirAs(block, inst), - //.as_node => try sema.zirAsNode(block, inst), - //.bit_and => try sema.zirBitwise(block, inst, .bit_and), - //.bit_not => try sema.zirBitNot(block, inst), - //.bit_or => try sema.zirBitwise(block, inst, .bit_or), - //.bitcast => try sema.zirBitcast(block, inst), - //.bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst), - //.block => try sema.zirBlock(block, inst), - //.suspend_block => try sema.zirSuspendBlock(block, inst), - //.bool_not => try sema.zirBoolNot(block, inst), - //.bool_br_and => try sema.zirBoolBr(block, inst, false), - //.bool_br_or => try sema.zirBoolBr(block, inst, true), - //.c_import => try sema.zirCImport(block, inst), - //.call => try sema.zirCall(block, inst, .auto, false), - //.call_chkused => try sema.zirCall(block, inst, .auto, true), - //.call_compile_time => try sema.zirCall(block, inst, .compile_time, false), - //.call_nosuspend => try sema.zirCall(block, inst, .no_async, false), - //.call_async => try sema.zirCall(block, inst, .async_kw, false), + .alloc => try sema.zirAlloc(block, inst), + .alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), + .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), + .alloc_inferred_comptime => try sema.zirAllocInferredComptime(block, inst), + .alloc_mut => try sema.zirAllocMut(block, inst), + .alloc_comptime => try sema.zirAllocComptime(block, inst), + .anyframe_type => try sema.zirAnyframeType(block, inst), + .array_cat => try sema.zirArrayCat(block, inst), + .array_mul => try sema.zirArrayMul(block, inst), + .array_type => try sema.zirArrayType(block, inst), + .array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), + .vector_type => try sema.zirVectorType(block, inst), + .as => try sema.zirAs(block, inst), + .as_node => try sema.zirAsNode(block, inst), + .bit_and => try sema.zirBitwise(block, inst, .bit_and), + .bit_not => try sema.zirBitNot(block, inst), + .bit_or => try sema.zirBitwise(block, inst, .bit_or), + .bitcast => try sema.zirBitcast(block, inst), + .bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst), + .block => try sema.zirBlock(block, inst), + .suspend_block => try sema.zirSuspendBlock(block, inst), + .bool_not => try sema.zirBoolNot(block, inst), + .bool_br_and => try sema.zirBoolBr(block, inst, false), + .bool_br_or => try sema.zirBoolBr(block, inst, true), + .c_import => try sema.zirCImport(block, inst), + .call => try sema.zirCall(block, inst, .auto, false), + .call_chkused => try sema.zirCall(block, inst, .auto, true), + .call_compile_time => try sema.zirCall(block, inst, .compile_time, false), + .call_nosuspend => try sema.zirCall(block, inst, .no_async, false), + .call_async => try sema.zirCall(block, inst, .async_kw, false), .cmp_eq => try sema.zirCmp(block, inst, .eq), .cmp_gt => try sema.zirCmp(block, inst, .gt), .cmp_gte => try sema.zirCmp(block, inst, .gte), @@ -1957,24 +1957,23 @@ fn analyzeBlockBody( // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); - assert(child_block.instructions.items[child_block.instructions.items.len - 1].ty.isNoReturn()); + assert(sema.getTypeOf(indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn()); if (merges.results.items.len == 0) { // No need for a block instruction. We can put the new instructions // directly into the parent block. - const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items); - try parent_block.instructions.appendSlice(gpa, copied_instructions); - return copied_instructions[copied_instructions.len - 1]; + try parent_block.instructions.appendSlice(gpa, child_block.instructions.items); + return indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1]); } if (merges.results.items.len == 1) { const last_inst_index = child_block.instructions.items.len - 1; const last_inst = child_block.instructions.items[last_inst_index]; - if (last_inst.breakBlock()) |br_block| { + if (sema.getBreakBlock(last_inst)) |br_block| { if (br_block == merges.block_inst) { // No need for a block instruction. We can put the new instructions directly // into the parent block. Here we omit the break instruction. - const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items[0..last_inst_index]); - try parent_block.instructions.appendSlice(gpa, copied_instructions); + const without_break = child_block.instructions.items[0..last_inst_index]; + try parent_block.instructions.appendSlice(gpa, without_break); return merges.results.items[0]; } } @@ -1998,36 +1997,50 @@ fn analyzeBlockBody( // Now that the block has its type resolved, we need to go back into all the break // instructions, and insert type coercion on the operands. for (merges.br_list.items) |br| { - if (sema.getTypeOf(br.operand).eql(resolved_ty)) { + const br_operand = sema.air_instructions.items(.data)[br].br.operand; + const br_operand_src = src; + const br_operand_ty = sema.getTypeOf(br_operand); + if (br_operand_ty.eql(resolved_ty)) { // No type coercion needed. continue; } var coerce_block = parent_block.makeSubBlock(); defer coerce_block.instructions.deinit(gpa); - const coerced_operand = try sema.coerce(&coerce_block, resolved_ty, br.operand, br.operand.src); + const coerced_operand = try sema.coerce(&coerce_block, resolved_ty, br_operand, br_operand_src); // If no instructions were produced, such as in the case of a coercion of a // constant value to a new type, we can simply point the br operand to it. if (coerce_block.instructions.items.len == 0) { - br.operand = coerced_operand; + sema.air_instructions.items(.data)[br].br.operand = coerced_operand; continue; } - assert(coerce_block.instructions.items[coerce_block.instructions.items.len - 1] == coerced_operand); - // Here we depend on the br instruction having been over-allocated (if necessary) - // inside zirBreak so that it can be converted into a br_block_flat instruction. - const br_src = br.base.src; - const br_ty = br.base.ty; - const br_block_flat = @ptrCast(*Inst.BrBlockFlat, br); - br_block_flat.* = .{ - .base = .{ - .src = br_src, - .ty = br_ty, - .tag = .br_block_flat, - }, - .block = merges.block_inst, - .body = .{ - .instructions = try sema.arena.dupe(Air.Inst.Index, coerce_block.instructions.items), - }, - }; + assert(coerce_block.instructions.items[coerce_block.instructions.items.len - 1] == + refToIndex(coerced_operand).?); + + // Convert the br operand to a block. + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + + coerce_block.instructions.items.len); + try sema.air_instructions.ensureUnusedCapacity(gpa, 2); + const sub_block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const sub_br_inst = sub_block_inst + 1; + sema.air_instructions.items(.data)[br].br.operand = indexToRef(sub_block_inst); + sema.air_instructions.appendAssumeCapacity(.{ + .tag = .block, + .data = .{ .ty_pl = .{ + .ty = try sema.addType(br_operand_ty), + .payload = sema.addExtraAssumeCapacity(Air.Block{ + .body_len = @intCast(u32, coerce_block.instructions.items.len), + }), + } }, + }); + sema.air_extra.appendSliceAssumeCapacity(coerce_block.instructions.items); + sema.air_extra.appendAssumeCapacity(sub_br_inst); + sema.air_instructions.appendAssumeCapacity(.{ + .tag = .br, + .data = .{ .br = .{ + .block_inst = sub_block_inst, + .operand = coerced_operand, + } }, + }); } return indexToRef(merges.block_inst); } @@ -2257,10 +2270,11 @@ fn analyzeCall( ensure_result_used: bool, args: []const Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - if (func.ty.zigTypeTag() != .Fn) - return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func.ty}); + const func_ty = sema.getTypeOf(func); + if (func_ty.zigTypeTag() != .Fn) + return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func_ty}); - const cc = func.ty.fnCallingConvention(); + const cc = func_ty.fnCallingConvention(); if (cc == .Naked) { // TODO add error note: declared here return sema.mod.fail( @@ -2270,8 +2284,8 @@ fn analyzeCall( .{}, ); } - const fn_params_len = func.ty.fnParamLen(); - if (func.ty.fnIsVarArgs()) { + const fn_params_len = func_ty.fnParamLen(); + if (func_ty.fnIsVarArgs()) { assert(cc == .C); if (args.len < fn_params_len) { // TODO add error note: declared here @@ -2310,11 +2324,9 @@ fn analyzeCall( const gpa = sema.gpa; - const ret_type = func.ty.fnReturnType(); - const is_comptime_call = block.is_comptime or modifier == .compile_time; const is_inline_call = is_comptime_call or modifier == .always_inline or - func.ty.fnCallingConvention() == .Inline; + func_ty.fnCallingConvention() == .Inline; const result: Air.Inst.Ref = if (is_inline_call) res: { const func_val = try sema.resolveConstValue(block, func_src, func); const module_fn = switch (func_val.tag()) { @@ -2400,7 +2412,19 @@ fn analyzeCall( break :res result; } else res: { try sema.requireRuntimeBlock(block, call_src); - break :res try block.addCall(call_src, ret_type, func, args); + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).Struct.fields.len + + args.len); + const func_inst = try block.addInst(.{ + .tag = .call, + .data = .{ .pl_op = .{ + .operand = func, + .payload = sema.addExtraAssumeCapacity(Air.Call{ + .args_len = @intCast(u32, args.len), + }), + } }, + }); + sema.appendRefsAssumeCapacity(args); + break :res func_inst; }; if (ensure_result_used) { @@ -8140,3 +8164,17 @@ pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { } return result; } + +fn appendRefsAssumeCapacity(sema: *Sema, refs: []const Air.Inst.Ref) void { + const coerced = @bitCast([]const u32, refs); + sema.air_extra.appendSliceAssumeCapacity(coerced); +} + +fn getBreakBlock(sema: *Sema, inst_index: Air.Inst.Index) ?Air.Inst.Index { + const air_datas = sema.air_instructions.items(.data); + const air_tags = sema.air_instructions.items(.tag); + switch (air_tags[inst_index]) { + .br => return air_datas[inst_index].br.block_inst, + else => return null, + } +} -- cgit v1.2.3 From 8082660118bba78de00e1e103e53730a87b2b70f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Jul 2021 18:22:18 -0700 Subject: stage2: codegen.zig updated to new AIR memory layout --- src/Air.zig | 143 +++++- src/AstGen.zig | 77 ++-- src/Liveness.zig | 54 ++- src/Module.zig | 4 +- src/Sema.zig | 150 +------ src/Zir.zig | 6 +- src/codegen.zig | 1321 +++++++++++++++++++++++++++++------------------------- 7 files changed, 946 insertions(+), 809 deletions(-) (limited to 'src/Liveness.zig') diff --git a/src/Air.zig b/src/Air.zig index a8b38b7659..f4c4fa4155 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -13,9 +13,9 @@ const Air = @This(); instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. /// The first few indexes are reserved. See `ExtraIndex` for the values. -extra: []u32, -values: []Value, -variables: []*Module.Var, +extra: []const u32, +values: []const Value, +variables: []const *Module.Var, pub const ExtraIndex = enum(u32) { /// Payload index of the main `Block` in the `extra` array. @@ -378,22 +378,109 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { return air.extra[extra.end..][0..extra.data.body_len]; } -pub fn getType(air: Air, inst: Air.Inst.Index) Type { - _ = air; - _ = inst; - @panic("TODO Air getType"); +pub fn typeOf(air: Air, inst: Air.Inst.Ref) Type { + const ref_int = @enumToInt(inst); + if (ref_int < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[ref_int].ty; + } + return air.typeOfIndex(@intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len)); +} + +pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { + const datas = air.instructions.items(.data); + switch (air.instructions.items(.tag)[inst]) { + .arg => return air.getRefType(datas[inst].ty_str.ty), + + .add, + .addwrap, + .sub, + .subwrap, + .mul, + .mulwrap, + .div, + .bit_and, + .bit_or, + .xor, + => return air.typeOf(datas[inst].bin_op.lhs), + + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + .is_null, + .is_non_null, + .is_null_ptr, + .is_non_null_ptr, + .is_err, + .is_non_err, + .is_err_ptr, + .is_non_err_ptr, + .bool_and, + .bool_or, + => return Type.initTag(.bool), + + .const_ty => return Type.initTag(.type), + + .alloc => return datas[inst].ty, + + .assembly, + .block, + .constant, + .varptr, + .struct_field_ptr, + => return air.getRefType(datas[inst].ty_pl.ty), + + .not, + .bitcast, + .load, + .ref, + .floatcast, + .intcast, + .optional_payload, + .optional_payload_ptr, + .wrap_optional, + .unwrap_errunion_payload, + .unwrap_errunion_err, + .unwrap_errunion_payload_ptr, + .unwrap_errunion_err_ptr, + .wrap_errunion_payload, + .wrap_errunion_err, + => return air.getRefType(datas[inst].ty_op.ty), + + .loop, + .br, + .cond_br, + .switch_br, + .ret, + .unreach, + => return Type.initTag(.noreturn), + + .breakpoint, + .dbg_stmt, + .store, + => return Type.initTag(.void), + + .ptrtoint => return Type.initTag(.usize), + + .call => { + const callee_ty = air.typeOf(datas[inst].pl_op.operand); + return callee_ty.fnReturnType(); + }, + } } pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { - var i: usize = @enumToInt(ref); - if (i < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; + const ref_int = @enumToInt(ref); + if (ref_int < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[ref_int].val.toType(undefined) catch unreachable; } - i -= Air.Inst.Ref.typed_value_map.len; + const inst_index = ref_int - Air.Inst.Ref.typed_value_map.len; const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); - assert(air_tags[i] == .const_ty); - return air_datas[i].ty; + assert(air_tags[inst_index] == .const_ty); + return air_datas[inst_index].ty; } /// Returns the requested data, as well as the new index which is at the start of the @@ -424,3 +511,33 @@ pub fn deinit(air: *Air, gpa: *std.mem.Allocator) void { gpa.free(air.variables); air.* = undefined; } + +const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; + +pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { + return @intToEnum(Air.Inst.Ref, ref_start_index + inst); +} + +pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { + const ref_int = @enumToInt(inst); + if (ref_int >= ref_start_index) { + return ref_int - ref_start_index; + } else { + return null; + } +} + +/// Returns `null` if runtime-known. +pub fn value(air: Air, inst: Air.Inst.Ref) ?Value { + const ref_int = @enumToInt(inst); + if (ref_int < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[ref_int].val; + } + const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const air_datas = air.instructions.items(.data); + switch (air.instructions.items(.tag)[inst_index]) { + .constant => return air.values[air_datas[inst_index].ty_pl.payload], + .const_ty => unreachable, + else => return air.typeOfIndex(inst_index).onePossibleValue(), + } +} diff --git a/src/AstGen.zig b/src/AstGen.zig index 1b58b3f2f7..cbd918ecc7 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -6412,37 +6412,12 @@ fn multilineStringLiteral( node: ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - - const start = node_datas[node].lhs; - const end = node_datas[node].rhs; - - const gpa = gz.astgen.gpa; - const string_bytes = &gz.astgen.string_bytes; - const str_index = string_bytes.items.len; - - // First line: do not append a newline. - var tok_i = start; - { - const slice = tree.tokenSlice(tok_i); - const line_bytes = slice[2 .. slice.len - 1]; - try string_bytes.appendSlice(gpa, line_bytes); - tok_i += 1; - } - // Following lines: each line prepends a newline. - while (tok_i <= end) : (tok_i += 1) { - const slice = tree.tokenSlice(tok_i); - const line_bytes = slice[2 .. slice.len - 1]; - try string_bytes.ensureCapacity(gpa, string_bytes.items.len + line_bytes.len + 1); - string_bytes.appendAssumeCapacity('\n'); - string_bytes.appendSliceAssumeCapacity(line_bytes); - } + const str = try astgen.strLitNodeAsString(node); const result = try gz.add(.{ .tag = .str, .data = .{ .str = .{ - .start = @intCast(u32, str_index), - .len = @intCast(u32, string_bytes.items.len - str_index), + .start = str.index, + .len = str.len, } }, }); return rvalue(gz, rl, result, node); @@ -6620,9 +6595,14 @@ fn asmExpr( const tree = astgen.tree; const main_tokens = tree.nodes.items(.main_token); const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); const token_tags = tree.tokens.items(.tag); - const asm_source = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, full.ast.template); + const asm_source = switch (node_tags[full.ast.template]) { + .string_literal => try astgen.strLitAsString(main_tokens[full.ast.template]), + .multiline_string_literal => try astgen.strLitNodeAsString(full.ast.template), + else => return astgen.failNode(node, "assembly code must use string literal syntax", .{}), + }; // See https://github.com/ziglang/zig/issues/215 and related issues discussing // possible inline assembly improvements. Until then here is status quo AstGen @@ -6752,7 +6732,7 @@ fn asmExpr( const result = try gz.addAsm(.{ .node = node, - .asm_source = asm_source, + .asm_source = asm_source.index, .is_volatile = full.volatile_token != null, .output_type_bits = output_type_bits, .outputs = outputs, @@ -8579,6 +8559,41 @@ fn strLitAsString(astgen: *AstGen, str_lit_token: ast.TokenIndex) !IndexSlice { } } +fn strLitNodeAsString(astgen: *AstGen, node: ast.Node.Index) !IndexSlice { + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + const start = node_datas[node].lhs; + const end = node_datas[node].rhs; + + const gpa = astgen.gpa; + const string_bytes = &astgen.string_bytes; + const str_index = string_bytes.items.len; + + // First line: do not append a newline. + var tok_i = start; + { + const slice = tree.tokenSlice(tok_i); + const line_bytes = slice[2 .. slice.len - 1]; + try string_bytes.appendSlice(gpa, line_bytes); + tok_i += 1; + } + // Following lines: each line prepends a newline. + while (tok_i <= end) : (tok_i += 1) { + const slice = tree.tokenSlice(tok_i); + const line_bytes = slice[2 .. slice.len - 1]; + try string_bytes.ensureCapacity(gpa, string_bytes.items.len + line_bytes.len + 1); + string_bytes.appendAssumeCapacity('\n'); + string_bytes.appendSliceAssumeCapacity(line_bytes); + } + const len = string_bytes.items.len - str_index; + try string_bytes.append(gpa, 0); + return IndexSlice{ + .index = @intCast(u32, str_index), + .len = @intCast(u32, len), + }; +} + fn testNameString(astgen: *AstGen, str_lit_token: ast.TokenIndex) !u32 { const gpa = astgen.gpa; const string_bytes = &astgen.string_bytes; @@ -9440,7 +9455,7 @@ const GenZir = struct { args: struct { /// Absolute node index. This function does the conversion to offset from Decl. node: ast.Node.Index, - asm_source: Zir.Inst.Ref, + asm_source: u32, output_type_bits: u32, is_volatile: bool, outputs: []const Zir.Inst.Asm.Output, diff --git a/src/Liveness.zig b/src/Liveness.zig index 98af9eb429..79fc0d7325 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -21,7 +21,7 @@ const Log2Int = std.math.Log2Int; /// operand dies after this instruction. /// Instructions which need more data to track liveness have special handling via the /// `special` table. -tomb_bits: []const usize, +tomb_bits: []usize, /// Sparse table of specially handled instructions. The value is an index into the `extra` /// array. The meaning of the data depends on the AIR tag. special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), @@ -98,7 +98,7 @@ pub fn operandDies(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) bool return (l.tomb_bits[usize_index] & mask) != 0; } -pub fn clearOperandDeath(l: *Liveness, inst: Air.Inst.Index, operand: OperandInt) void { +pub fn clearOperandDeath(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) void { assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); const mask = @as(usize, 1) << @@ -106,16 +106,40 @@ pub fn clearOperandDeath(l: *Liveness, inst: Air.Inst.Index, operand: OperandInt l.tomb_bits[usize_index] |= mask; } +/// Higher level API. +pub const CondBrSlices = struct { + then_deaths: []const Air.Inst.Index, + else_deaths: []const Air.Inst.Index, +}; + +pub fn getCondBr(l: Liveness, inst: Air.Inst.Index) CondBrSlices { + var index: usize = l.special.get(inst) orelse return .{ + .then_deaths = &.{}, + .else_deaths = &.{}, + }; + const then_death_count = l.extra[index]; + index += 1; + const else_death_count = l.extra[index]; + index += 1; + const then_deaths = l.extra[index..][0..then_death_count]; + index += then_death_count; + return .{ + .then_deaths = then_deaths, + .else_deaths = l.extra[index..][0..else_death_count], + }; +} + pub fn deinit(l: *Liveness, gpa: *Allocator) void { gpa.free(l.tomb_bits); gpa.free(l.extra); l.special.deinit(gpa); + l.* = undefined; } /// How many tomb bits per AIR instruction. -const bpi = 4; -const Bpi = std.meta.Int(.unsigned, bpi); -const OperandInt = std.math.Log2Int(Bpi); +pub const bpi = 4; +pub const Bpi = std.meta.Int(.unsigned, bpi); +pub const OperandInt = std.math.Log2Int(Bpi); /// In-progress data; on successful analysis converted into `Liveness`. const Analysis = struct { @@ -267,14 +291,14 @@ fn analyzeInst( const inst_data = inst_datas[inst].pl_op; const callee = inst_data.operand; const extra = a.air.extraData(Air.Call, inst_data.payload); - const args = a.air.extra[extra.end..][0..extra.data.args_len]; + const args = @bitCast([]const Air.Inst.Ref, a.air.extra[extra.end..][0..extra.data.args_len]); if (args.len <= bpi - 2) { - var buf: [bpi - 1]Air.Inst.Ref = undefined; + var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1); buf[0] = callee; - std.mem.copy(Air.Inst.Ref, buf[1..], @bitCast([]const Air.Inst.Ref, args)); + std.mem.copy(Air.Inst.Ref, buf[1..], args); return trackOperands(a, new_set, inst, main_tomb, buf); } - @panic("TODO: liveness analysis for function with greater than 2 args"); + @panic("TODO: liveness analysis for function call with greater than 2 args"); }, .struct_field_ptr => { const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data; @@ -285,12 +309,12 @@ fn analyzeInst( const extended = a.zir.instructions.items(.data)[extra.data.zir_index].extended; const outputs_len = @truncate(u5, extended.small); const inputs_len = @truncate(u5, extended.small >> 5); - const outputs = a.air.extra[extra.end..][0..outputs_len]; - const inputs = a.air.extra[extra.end + outputs.len ..][0..inputs_len]; - if (outputs.len + inputs.len <= bpi - 1) { - var buf: [bpi - 1]Air.Inst.Ref = undefined; - std.mem.copy(Air.Inst.Ref, &buf, @bitCast([]const Air.Inst.Ref, outputs)); - std.mem.copy(Air.Inst.Ref, buf[outputs.len..], @bitCast([]const Air.Inst.Ref, inputs)); + const outputs = @bitCast([]const Air.Inst.Ref, a.air.extra[extra.end..][0..outputs_len]); + const args = @bitCast([]const Air.Inst.Ref, a.air.extra[extra.end + outputs.len ..][0..inputs_len]); + if (outputs.len + args.len <= bpi - 1) { + var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1); + std.mem.copy(Air.Inst.Ref, &buf, outputs); + std.mem.copy(Air.Inst.Ref, buf[outputs.len..], args); return trackOperands(a, new_set, inst, main_tomb, buf); } @panic("TODO: liveness analysis for asm with greater than 3 args"); diff --git a/src/Module.zig b/src/Module.zig index f452824d33..c101221f2e 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1309,7 +1309,7 @@ pub const Scope = struct { const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len); sema.air_instructions.appendAssumeCapacity(inst); block.instructions.appendAssumeCapacity(result_index); - return Sema.indexToRef(result_index); + return Air.indexToRef(result_index); } }; }; @@ -3533,7 +3533,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) SemaError!Air { const ty_ref = try sema.addType(param_type); const arg_index = @intCast(u32, sema.air_instructions.len); inner_block.instructions.appendAssumeCapacity(arg_index); - param_inst.* = Sema.indexToRef(arg_index); + param_inst.* = Air.indexToRef(arg_index); try sema.air_instructions.append(gpa, .{ .tag = .arg, .data = .{ diff --git a/src/Sema.zig b/src/Sema.zig index a144ce1d50..777619dc48 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1301,7 +1301,7 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!A // Set the name of the Air.Arg instruction for use by codegen debug info. const air_arg = sema.param_inst_list[arg_index]; - sema.air_instructions.items(.data)[refToIndex(air_arg).?].ty_str.str = inst_data.start; + sema.air_instructions.items(.data)[Air.refToIndex(air_arg).?].ty_str.str = inst_data.start; return air_arg; } @@ -1389,7 +1389,7 @@ fn zirAllocInferred( // to the block even though it is currently a `.constant`. const result = try sema.addConstant(inferred_alloc_ty, Value.initPayload(&val_payload.base)); try sema.requireFunctionBlock(block, src); - try block.instructions.append(sema.gpa, refToIndex(result).?); + try block.instructions.append(sema.gpa, Air.refToIndex(result).?); return result; } @@ -1400,7 +1400,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const ptr = sema.resolveInst(inst_data.operand); - const ptr_inst = refToIndex(ptr).?; + const ptr_inst = Air.refToIndex(ptr).?; assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const air_datas = sema.air_instructions.items(.data); const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; @@ -1586,7 +1586,7 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) const bin_inst = sema.code.instructions.items(.data)[inst].bin; const ptr = sema.resolveInst(bin_inst.lhs); const value = sema.resolveInst(bin_inst.rhs); - const ptr_inst = refToIndex(ptr).?; + const ptr_inst = Air.refToIndex(ptr).?; assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const air_datas = sema.air_instructions.items(.data); const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; @@ -1968,13 +1968,13 @@ fn analyzeBlockBody( // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); - assert(sema.typeOf(indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn()); + assert(sema.typeOf(Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn()); if (merges.results.items.len == 0) { // No need for a block instruction. We can put the new instructions // directly into the parent block. try parent_block.instructions.appendSlice(gpa, child_block.instructions.items); - return indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1]); + return Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1]); } if (merges.results.items.len == 1) { const last_inst_index = child_block.instructions.items.len - 1; @@ -2025,7 +2025,7 @@ fn analyzeBlockBody( continue; } assert(coerce_block.instructions.items[coerce_block.instructions.items.len - 1] == - refToIndex(coerced_operand).?); + Air.refToIndex(coerced_operand).?); // Convert the br operand to a block. const br_operand_ty_ref = try sema.addType(br_operand_ty); @@ -2034,7 +2034,7 @@ fn analyzeBlockBody( try sema.air_instructions.ensureUnusedCapacity(gpa, 2); const sub_block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); const sub_br_inst = sub_block_inst + 1; - sema.air_instructions.items(.data)[br].br.operand = indexToRef(sub_block_inst); + sema.air_instructions.items(.data)[br].br.operand = Air.indexToRef(sub_block_inst); sema.air_instructions.appendAssumeCapacity(.{ .tag = .block, .data = .{ .ty_pl = .{ @@ -2054,7 +2054,7 @@ fn analyzeBlockBody( } }, }); } - return indexToRef(merges.block_inst); + return Air.indexToRef(merges.block_inst); } fn zirExport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { @@ -2149,7 +2149,7 @@ fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) Compil if (label.zir_block == zir_block) { const br_ref = try start_block.addBr(label.merges.block_inst, operand); try label.merges.results.append(sema.gpa, operand); - try label.merges.br_list.append(sema.gpa, refToIndex(br_ref).?); + try label.merges.br_list.append(sema.gpa, Air.refToIndex(br_ref).?); return inst; } } @@ -5310,7 +5310,7 @@ fn zirBoolBr( } } }); try parent_block.instructions.append(gpa, block_inst); - return indexToRef(block_inst); + return Air.indexToRef(block_inst); } fn zirIsNonNull( @@ -7204,7 +7204,7 @@ fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedVal } }, }); try block.instructions.append(gpa, result_inst); - return indexToRef(result_inst); + return Air.indexToRef(result_inst); } fn analyzeRef( @@ -8021,107 +8021,18 @@ fn enumFieldSrcLoc( } else unreachable; } -/// This is only meant to be called by `typeOf`. -fn analyzeAsTypeInfallible(sema: *Sema, inst: Air.Inst.Ref) Type { - var i: usize = @enumToInt(inst); - if (i < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; - } - i -= Air.Inst.Ref.typed_value_map.len; - assert(sema.air_instructions.items(.tag)[i] == .const_ty); - return sema.air_instructions.items(.data)[i].ty; -} - /// Returns the type of the AIR instruction. fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { - var i: usize = @enumToInt(inst); - if (i < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[i].ty; - } - i -= Air.Inst.Ref.typed_value_map.len; + return sema.getTmpAir().typeOf(inst); +} - const air_datas = sema.air_instructions.items(.data); - switch (sema.air_instructions.items(.tag)[i]) { - .arg => return sema.analyzeAsTypeInfallible(air_datas[i].ty_str.ty), - - .add, - .addwrap, - .sub, - .subwrap, - .mul, - .mulwrap, - .div, - .bit_and, - .bit_or, - .xor, - => return sema.typeOf(air_datas[i].bin_op.lhs), - - .cmp_lt, - .cmp_lte, - .cmp_eq, - .cmp_gte, - .cmp_gt, - .cmp_neq, - .is_null, - .is_non_null, - .is_null_ptr, - .is_non_null_ptr, - .is_err, - .is_non_err, - .is_err_ptr, - .is_non_err_ptr, - .bool_and, - .bool_or, - => return Type.initTag(.bool), - - .const_ty => return Type.initTag(.type), - - .alloc => return air_datas[i].ty, - - .assembly, - .block, - .constant, - .varptr, - .struct_field_ptr, - => return sema.analyzeAsTypeInfallible(air_datas[i].ty_pl.ty), - - .not, - .bitcast, - .load, - .ref, - .floatcast, - .intcast, - .optional_payload, - .optional_payload_ptr, - .wrap_optional, - .unwrap_errunion_payload, - .unwrap_errunion_err, - .unwrap_errunion_payload_ptr, - .unwrap_errunion_err_ptr, - .wrap_errunion_payload, - .wrap_errunion_err, - => return sema.analyzeAsTypeInfallible(air_datas[i].ty_op.ty), - - .loop, - .br, - .cond_br, - .switch_br, - .ret, - .unreach, - => return Type.initTag(.noreturn), - - .breakpoint, - .dbg_stmt, - .store, - => return Type.initTag(.void), - - .ptrtoint => return Type.initTag(.usize), - - .call => { - const callee_ty = sema.typeOf(air_datas[i].pl_op.operand); - return callee_ty.fnReturnType(); - }, - } +fn getTmpAir(sema: Sema) Air { + return .{ + .instructions = sema.air_instructions.slice(), + .extra = sema.air_extra.items, + .values = sema.air_values.items, + .variables = sema.air_variables.items, + }; } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { @@ -8185,7 +8096,7 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .tag = .const_ty, .data = .{ .ty = ty }, }); - return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { @@ -8207,22 +8118,7 @@ fn addConstant(sema: *Sema, ty: Type, val: Value) CompileError!Air.Inst.Ref { .payload = @intCast(u32, sema.air_values.items.len - 1), } }, }); - return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); -} - -const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; - -pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { - return @intToEnum(Air.Inst.Ref, ref_start_index + inst); -} - -pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { - const ref_int = @enumToInt(inst); - if (ref_int >= ref_start_index) { - return ref_int - ref_start_index; - } else { - return null; - } + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 { diff --git a/src/Zir.zig b/src/Zir.zig index 42924817fc..cf349a6a8d 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2176,7 +2176,8 @@ pub const Inst = struct { /// 2. clobber: u32 // index into string_bytes (null terminated) for every clobbers_len. pub const Asm = struct { src_node: i32, - asm_source: Ref, + // null-terminated string index + asm_source: u32, /// 1 bit for each outputs_len: whether it uses `-> T` or not. /// 0b0 - operand is a pointer to where to store the output. /// 0b1 - operand is a type; asm expression has the output as the result. @@ -3383,9 +3384,10 @@ const Writer = struct { const inputs_len = @truncate(u5, extended.small >> 5); const clobbers_len = @truncate(u5, extended.small >> 10); const is_volatile = @truncate(u1, extended.small >> 15) != 0; + const asm_source = self.code.nullTerminatedString(extra.data.asm_source); try self.writeFlag(stream, "volatile, ", is_volatile); - try self.writeInstRef(stream, extra.data.asm_source); + try stream.print("\"{}\", ", .{std.zig.fmtEscapes(asm_source)}); try stream.writeAll(", "); var extra_i: usize = extra.end; diff --git a/src/codegen.zig b/src/codegen.zig index 1495b19673..bc22d7ec19 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -3,6 +3,7 @@ const mem = std.mem; const math = std.math; const assert = std.debug.assert; const Air = @import("Air.zig"); +const Zir = @import("Zir.zig"); const Liveness = @import("Liveness.zig"); const Type = @import("type.zig").Type; const Value = @import("value.zig").Value; @@ -337,6 +338,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// to place a new stack allocation, it goes here, and then bumps `max_end_stack`. next_stack_offset: u32 = 0, + /// Debug field, used to find bugs in the compiler. + air_bookkeeping: @TypeOf(air_bookkeeping_init) = air_bookkeeping_init, + + const air_bookkeeping_init = if (std.debug.runtime_safety) @as(usize, 0) else {}; + const MCValue = union(enum) { /// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc. /// TODO Look into deleting this tag and using `dead` instead, since every use @@ -751,24 +757,91 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { - for (body) |inst| { - const tomb_bits = self.liveness.getTombBits(inst); - try self.ensureProcessDeathCapacity(@popCount(@TypeOf(tomb_bits), tomb_bits)); + const air_tags = self.air.instructions.items(.tag); - const mcv = try self.genFuncInst(inst); - if (!self.liveness.isUnused(inst)) { - log.debug("{} => {}", .{ inst, mcv }); - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - try branch.inst_table.putNoClobber(self.gpa, inst, mcv); + for (body) |inst| { + const old_air_bookkeeping = self.air_bookkeeping; + try self.ensureProcessDeathCapacity(Liveness.bpi); + + switch (air_tags[inst]) { + // zig fmt: off + .add => try self.airAdd(inst), + .addwrap => try self.airAddWrap(inst), + .sub => try self.airSub(inst), + .subwrap => try self.airSubWrap(inst), + .mul => try self.airMul(inst), + .mulwrap => try self.airMulWrap(inst), + .div => try self.airDiv(inst), + + .cmp_lt => try self.airCmp(inst, .lt), + .cmp_lte => try self.airCmp(inst, .lte), + .cmp_eq => try self.airCmp(inst, .eq), + .cmp_gte => try self.airCmp(inst, .gte), + .cmp_gt => try self.airCmp(inst, .gt), + .cmp_neq => try self.airCmp(inst, .neq), + + .bool_and => try self.airBoolOp(inst), + .bool_or => try self.airBoolOp(inst), + .bit_and => try self.airBitAnd(inst), + .bit_or => try self.airBitOr(inst), + .xor => try self.airXor(inst), + + .alloc => try self.airAlloc(inst), + .arg => try self.airArg(inst), + .assembly => try self.airAsm(inst), + .bitcast => try self.airBitCast(inst), + .block => try self.airBlock(inst), + .br => try self.airBr(inst), + .breakpoint => try self.airBreakpoint(), + .call => try self.airCall(inst), + .cond_br => try self.airCondBr(inst), + .dbg_stmt => try self.airDbgStmt(inst), + .floatcast => try self.airFloatCast(inst), + .intcast => try self.airIntCast(inst), + .is_non_null => try self.airIsNonNull(inst), + .is_non_null_ptr => try self.airIsNonNullPtr(inst), + .is_null => try self.airIsNull(inst), + .is_null_ptr => try self.airIsNullPtr(inst), + .is_non_err => try self.airIsNonErr(inst), + .is_non_err_ptr => try self.airIsNonErrPtr(inst), + .is_err => try self.airIsErr(inst), + .is_err_ptr => try self.airIsErrPtr(inst), + .load => try self.airLoad(inst), + .loop => try self.airLoop(inst), + .not => try self.airNot(inst), + .ptrtoint => try self.airPtrToInt(inst), + .ref => try self.airRef(inst), + .ret => try self.airRet(inst), + .store => try self.airStore(inst), + .struct_field_ptr=> try self.airStructFieldPtr(inst), + .switch_br => try self.airSwitch(inst), + .varptr => try self.airVarPtr(inst), + + .constant => unreachable, // excluded from function bodies + .const_ty => unreachable, // excluded from function bodies + .unreach => self.finishAirBookkeeping(), + + .optional_payload => try self.airOptionalPayload(inst), + .optional_payload_ptr => try self.airOptionalPayloadPtr(inst), + .unwrap_errunion_err => try self.airUnwrapErrErr(inst), + .unwrap_errunion_payload => try self.airUnwrapErrPayload(inst), + .unwrap_errunion_err_ptr => try self.airUnwrapErrErrPtr(inst), + .unwrap_errunion_payload_ptr=> try self.airUnwrapErrPayloadPtr(inst), + + .wrap_optional => try self.airWrapOptional(inst), + .wrap_errunion_payload => try self.airWrapErrUnionPayload(inst), + .wrap_errunion_err => try self.airWrapErrUnionErr(inst), + // zig fmt: on + } + if (std.debug.runtime_safety) { + if (self.air_bookkeeping != old_air_bookkeeping + 1) { + std.debug.panic( + \\in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. + \\Look for a missing call to finishAir or an extra call to it. + \\ + , .{ inst, air_tags[inst] }); + } } - - // TODO inline this logic into every instruction - @panic("TODO rework AIR memory layout codegen for processing deaths"); - //var i: ir.Inst.DeathsBitIndex = 0; - //while (inst.getOperand(i)) |operand| : (i += 1) { - // if (inst.operandDies(i)) - // self.processDeath(operand); - //} } } @@ -833,9 +906,36 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } + /// Called when there are no operands, and the instruction is always unreferenced. + fn finishAirBookkeeping(self: *Self) void { + if (std.debug.runtime_safety) { + self.air_bookkeeping += 1; + } + } + + fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void { + var tomb_bits = self.liveness.getTombBits(inst); + for (operands) |op| { + const dies = @truncate(u1, tomb_bits) != 0; + tomb_bits >>= 1; + if (!dies) continue; + const op_int = @enumToInt(op); + if (op_int < Air.Inst.Ref.typed_value_map.len) continue; + const operand: Air.Inst.Index = op_int - @intCast(u32, Air.Inst.Ref.typed_value_map.len); + self.processDeath(operand); + } + const is_used = @truncate(u1, tomb_bits) == 0; + if (is_used) { + log.debug("{} => {}", .{ inst, result }); + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + branch.inst_table.putAssumeCapacityNoClobber(inst, result); + } + self.finishAirBookkeeping(); + } + fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { const table = &self.branch_stack.items[self.branch_stack.items.len - 1].inst_table; - try table.ensureCapacity(self.gpa, table.count() + additional_count); + try table.ensureUnusedCapacity(self.gpa, additional_count); } /// Adds a Type to the .debug_info at the current position. The bytes will be populated later, @@ -860,83 +960,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genFuncInst(self: *Self, inst: Air.Inst.Index) !MCValue { - const air_tags = self.air.instructions.items(.tag); - switch (air_tags[inst]) { - // zig fmt: off - //.add => return self.genAdd(inst.castTag(.add).?), - //.addwrap => return self.genAddWrap(inst.castTag(.addwrap).?), - //.sub => return self.genSub(inst.castTag(.sub).?), - //.subwrap => return self.genSubWrap(inst.castTag(.subwrap).?), - //.mul => return self.genMul(inst.castTag(.mul).?), - //.mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?), - //.div => return self.genDiv(inst.castTag(.div).?), - - //.cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt), - //.cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte), - //.cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq), - //.cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte), - //.cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt), - //.cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq), - - //.bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), - //.bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), - //.bit_and => return self.genBitAnd(inst.castTag(.bit_and).?), - //.bit_or => return self.genBitOr(inst.castTag(.bit_or).?), - //.xor => return self.genXor(inst.castTag(.xor).?), - - //.alloc => return self.genAlloc(inst.castTag(.alloc).?), - //.arg => return self.genArg(inst.castTag(.arg).?), - //.assembly => return self.genAsm(inst.castTag(.assembly).?), - //.bitcast => return self.genBitCast(inst.castTag(.bitcast).?), - //.block => return self.genBlock(inst.castTag(.block).?), - //.br => return self.genBr(inst.castTag(.br).?), - //.br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), - //.breakpoint => return self.genBreakpoint(inst.src), - //.call => return self.genCall(inst.castTag(.call).?), - //.cond_br => return self.genCondBr(inst.castTag(.condbr).?), - //.dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?), - //.floatcast => return self.genFloatCast(inst.castTag(.floatcast).?), - //.intcast => return self.genIntCast(inst.castTag(.intcast).?), - //.is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?), - //.is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?), - //.is_null => return self.genIsNull(inst.castTag(.is_null).?), - //.is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), - //.is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?), - //.is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?), - //.is_err => return self.genIsErr(inst.castTag(.is_err).?), - //.is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), - //.load => return self.genLoad(inst.castTag(.load).?), - //.loop => return self.genLoop(inst.castTag(.loop).?), - //.not => return self.genNot(inst.castTag(.not).?), - //.ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?), - //.ref => return self.genRef(inst.castTag(.ref).?), - //.ret => return self.genRet(inst.castTag(.ret).?), - //.store => return self.genStore(inst.castTag(.store).?), - //.struct_field_ptr=> return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), - //.switch_br => return self.genSwitch(inst.castTag(.switchbr).?), - //.varptr => return self.genVarPtr(inst.castTag(.varptr).?), - - //.constant => unreachable, // excluded from function bodies - //.unreach => return MCValue{ .unreach = {} }, - - //.optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), - //.optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), - //.unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?), - //.unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?), - //.unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?), - //.unwrap_errunion_payload_ptr=> return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?), - - //.wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), - //.wrap_errunion_payload => return self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), - //.wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?), - - // zig fmt: on - - else => @panic("TODO finish air memory layout branch, more codegen.zig instructions"), - } - } - fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 { if (abi_align > self.stack_align) self.stack_align = abi_align; @@ -954,7 +977,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const elem_ty = self.air.getType(inst).elemType(); + const elem_ty = self.air.typeOfIndex(inst).elemType(); const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty}); }; @@ -964,7 +987,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const elem_ty = inst.ty; + const elem_ty = self.air.typeOfIndex(inst); const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty}); }; @@ -993,7 +1016,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { assert(reg == toCanonicalReg(reg_mcv.register)); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(inst.ty, stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// Copies a value to a register without tracking the register. The register is not considered @@ -1010,281 +1033,274 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { const reg = try self.register_manager.allocReg(reg_owner, &.{}); - try self.genSetReg(reg_owner.ty, reg, mcv); + try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } - fn genAlloc(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMemPtr(inst); - return MCValue{ .ptr_stack_offset = stack_offset }; + return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none }); } - fn genFloatCast(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airFloatCast(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement floatCast for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genIntCast(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. + fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) - return MCValue.dead; + return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); - const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.getType(ty_op.operand); + const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const info_a = operand_ty.intInfo(self.target.*); - const info_b = self.air.getType(inst).intInfo(self.target.*); + const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); if (info_a.bits == info_b.bits) - return operand; + return self.finishAir(inst, operand, .{ ty_op.operand, .none, .none }); - switch (arch) { + const result: MCValue = switch (arch) { else => return self.fail("TODO implement intCast for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genNot(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand = try self.resolveInst(ty_op.operand); - switch (operand) { - .dead => unreachable, - .unreach => unreachable, - .compare_flags_unsigned => |op| return MCValue{ - .compare_flags_unsigned = switch (op) { - .gte => .lt, - .gt => .lte, - .neq => .eq, - .lt => .gte, - .lte => .gt, - .eq => .neq, + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand = try self.resolveInst(ty_op.operand); + switch (operand) { + .dead => unreachable, + .unreach => unreachable, + .compare_flags_unsigned => |op| { + const r = MCValue{ + .compare_flags_unsigned = switch (op) { + .gte => .lt, + .gt => .lte, + .neq => .eq, + .lt => .gte, + .lte => .gt, + .eq => .neq, + }, + }; + break :result r; }, - }, - .compare_flags_signed => |op| return MCValue{ - .compare_flags_signed = switch (op) { - .gte => .lt, - .gt => .lte, - .neq => .eq, - .lt => .gte, - .lte => .gt, - .eq => .neq, + .compare_flags_signed => |op| { + const r = MCValue{ + .compare_flags_signed = switch (op) { + .gte => .lt, + .gt => .lte, + .neq => .eq, + .lt => .gte, + .lte => .gt, + .eq => .neq, + }, + }; + break :result r; }, - }, - else => {}, - } + else => {}, + } - switch (arch) { - .x86_64 => { - return try self.genX8664BinMath(inst, ty_op.operand, .bool_true); - }, - .arm, .armeb => { - return try self.genArmBinOp(inst, ty_op.operand, .bool_true, .not); - }, - else => return self.fail("TODO implement NOT for {}", .{self.target.cpu.arch}), - } + switch (arch) { + .x86_64 => { + break :result try self.genX8664BinMath(inst, ty_op.operand, .bool_true); + }, + .arm, .armeb => { + break :result try self.genArmBinOp(inst, ty_op.operand, .bool_true, .not); + }, + else => return self.fail("TODO implement NOT for {}", .{self.target.cpu.arch}), + } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genAdd(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airAdd(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - switch (arch) { - .x86_64 => { - return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs); - }, - .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .add), + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + .x86_64 => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .add), else => return self.fail("TODO implement add for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genAddWrap(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airAddWrap(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - _ = bin_op; - switch (arch) { + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement addwrap for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genMul(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airSub(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - switch (arch) { - .x86_64 => return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), - .arm, .armeb => return try self.genArmMul(inst, bin_op.lhs, bin_op.rhs), + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + .x86_64 => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .sub), + else => return self.fail("TODO implement sub for {}", .{self.target.cpu.arch}), + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + } + + fn airSubWrap(self: *Self, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + else => return self.fail("TODO implement subwrap for {}", .{self.target.cpu.arch}), + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + } + + fn airMul(self: *Self, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + .x86_64 => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .arm, .armeb => try self.genArmMul(inst, bin_op.lhs, bin_op.rhs), else => return self.fail("TODO implement mul for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genMulWrap(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airMulWrap(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - _ = bin_op; - switch (arch) { + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement mulwrap for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genDiv(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airDiv(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - _ = bin_op; - switch (arch) { + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement div for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genBitAnd(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - switch (arch) { - .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_and), + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + .arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_and), else => return self.fail("TODO implement bitwise and for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genBitOr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airBitOr(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - switch (arch) { - .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_or), + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + .arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_or), else => return self.fail("TODO implement bitwise or for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genXor(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airXor(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - switch (arch) { - .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .xor), + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + .arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .xor), else => return self.fail("TODO implement xor for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genOptionalPayload(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement .optional_payload for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement .optional_payload_ptr for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement unwrap error union error for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement unwrap error union payload for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } + // *(E!T) -> E - fn genUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement unwrap error union error ptr for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } + // *(E!T) -> *T - fn genUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement unwrap error union payload ptr for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genWrapOptional(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - const optional_ty = self.air.getType(inst); + fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const optional_ty = self.air.typeOfIndex(inst); - // Optional type is just a boolean true - if (optional_ty.abiSize(self.target.*) == 1) - return MCValue{ .immediate = 1 }; + // Optional with a zero-bit payload type is just a boolean true + if (optional_ty.abiSize(self.target.*) == 1) + break :result MCValue{ .immediate = 1 }; - switch (arch) { - else => return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}), - } + switch (arch) { + else => return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}), + } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } /// T to E!T - fn genWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - - switch (arch) { + fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement wrap errunion payload for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } /// E to E!T - fn genWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - - switch (arch) { + fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement wrap errunion error for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genVarPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airVarPtr(self: *Self, inst: Air.Inst.Index) !void { + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement varptr for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ .none, .none, .none }); } - fn reuseOperand(self: *Self, inst: Air.Inst.Index, op_index: u2, mcv: MCValue) bool { + fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool { if (!self.liveness.operandDies(inst, op_index)) return false; @@ -1310,12 +1326,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // That makes us responsible for doing the rest of the stuff that processDeath would have done. const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - branch.inst_table.putAssumeCapacity(inst.getOperand(op_index).?, .dead); + branch.inst_table.putAssumeCapacity(Air.refToIndex(operand).?, .dead); return true; } - fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue) !void { + fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) !void { + const elem_ty = ptr_ty.elemType(); switch (ptr) { .none => unreachable, .undef => unreachable, @@ -1343,31 +1360,37 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genLoad(self: *Self, inst: Air.Inst.Index) !MCValue { - const elem_ty = self.air.getType(inst); - if (!elem_ty.hasCodeGenBits()) - return MCValue.none; - const ptr = try self.resolveInst(inst.operand); - const is_volatile = inst.operand.ty.isVolatilePtr(); - if (self.liveness.isUnused(inst) and !is_volatile) - return MCValue.dead; - const dst_mcv: MCValue = blk: { - if (self.reuseOperand(inst, 0, ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } + fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const elem_ty = self.air.typeOfIndex(inst); + const result: MCValue = result: { + if (!elem_ty.hasCodeGenBits()) + break :result MCValue.none; + + const ptr = try self.resolveInst(ty_op.operand); + const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + if (self.liveness.isUnused(inst) and !is_volatile) + break :result MCValue.dead; + + const dst_mcv: MCValue = blk: { + if (self.reuseOperand(inst, ty_op.operand, 0, ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); + break :result dst_mcv; }; - self.load(dst_mcv, ptr); - return dst_mcv; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genStore(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airStore(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const elem_ty = self.getType(bin_op.rhs); + const elem_ty = self.air.typeOf(bin_op.rhs); switch (ptr) { .none => unreachable, .undef => unreachable, @@ -1397,36 +1420,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.fail("TODO implement storing to MCValue.stack_offset", .{}); }, } - return .none; + return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genStructFieldPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - const struct_field_ptr = self.air.instructions.items(.data)[inst].struct_field_ptr; - _ = struct_field_ptr; + fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; + _ = extra; return self.fail("TODO implement codegen struct_field_ptr", .{}); - } - - fn genSub(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - const bin_op = self.air.instructions.items(.data)[inst].bin_op; - switch (arch) { - .x86_64 => return self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), - .arm, .armeb => return self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .sub), - else => return self.fail("TODO implement sub for {}", .{self.target.cpu.arch}), - } - } - - fn genSubWrap(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - const bin_op = self.air.instructions.items(.data)[inst].bin_op; - _ = bin_op; - switch (arch) { - else => return self.fail("TODO implement subwrap for {}", .{self.target.cpu.arch}), - } + //return self.finishAir(inst, result, .{ extra.struct_ptr, .none, .none }); } fn armOperandShouldBeRegister(self: *Self, mcv: MCValue) !bool { @@ -1461,8 +1463,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const rhs_is_register = rhs == .register; const lhs_should_be_register = try self.armOperandShouldBeRegister(lhs); const rhs_should_be_register = try self.armOperandShouldBeRegister(rhs); - const reuse_lhs = lhs_is_register and self.reuseOperand(inst, 0, lhs); - const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, 1, rhs); + const reuse_lhs = lhs_is_register and self.reuseOperand(inst, op_lhs, 0, lhs); + const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, op_rhs, 1, rhs); // Destination must be a register var dst_mcv: MCValue = undefined; @@ -1476,14 +1478,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Allocate 0 or 1 registers if (!rhs_is_register and rhs_should_be_register) { rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_rhs, &.{lhs.register}) }; - branch.inst_table.putAssumeCapacity(op_rhs, rhs_mcv); + branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } dst_mcv = lhs; } else if (reuse_rhs) { // Allocate 0 or 1 registers if (!lhs_is_register and lhs_should_be_register) { lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_lhs, &.{rhs.register}) }; - branch.inst_table.putAssumeCapacity(op_lhs, lhs_mcv); + branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv); } dst_mcv = rhs; @@ -1508,7 +1510,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { rhs_mcv = MCValue{ .register = regs[1] }; dst_mcv = lhs_mcv; - branch.inst_table.putAssumeCapacity(op_rhs, rhs_mcv); + branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } } else if (lhs_should_be_register) { // RHS is immediate @@ -1605,14 +1607,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genArmMul(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Index, op_rhs: Air.Inst.Index) !MCValue { + fn genArmMul(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref) !MCValue { const lhs = try self.resolveInst(op_lhs); const rhs = try self.resolveInst(op_rhs); const lhs_is_register = lhs == .register; const rhs_is_register = rhs == .register; - const reuse_lhs = lhs_is_register and self.reuseOperand(inst, 0, lhs); - const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, 1, rhs); + const reuse_lhs = lhs_is_register and self.reuseOperand(inst, op_lhs, 0, lhs); + const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, op_rhs, 1, rhs); // Destination must be a register // LHS must be a register @@ -1627,14 +1629,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Allocate 0 or 1 registers if (!rhs_is_register) { rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_rhs, &.{lhs.register}) }; - branch.inst_table.putAssumeCapacity(op_rhs, rhs_mcv); + branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } dst_mcv = lhs; } else if (reuse_rhs) { // Allocate 0 or 1 registers if (!lhs_is_register) { lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_lhs, &.{rhs.register}) }; - branch.inst_table.putAssumeCapacity(op_lhs, lhs_mcv); + branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv); } dst_mcv = rhs; } else { @@ -1656,7 +1658,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { rhs_mcv = MCValue{ .register = regs[1] }; dst_mcv = lhs_mcv; - branch.inst_table.putAssumeCapacity(op_rhs, rhs_mcv); + branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } } @@ -1698,8 +1700,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // as the result MCValue. var dst_mcv: MCValue = undefined; var src_mcv: MCValue = undefined; - var src_inst: Air.Inst.Index = undefined; - if (self.reuseOperand(inst, 0, lhs)) { + var src_inst: Air.Inst.Ref = undefined; + if (self.reuseOperand(inst, op_lhs, 0, lhs)) { // LHS dies; use it as the destination. // Both operands cannot be memory. src_inst = op_rhs; @@ -1710,7 +1712,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { dst_mcv = lhs; src_mcv = rhs; } - } else if (self.reuseOperand(inst, 1, rhs)) { + } else if (self.reuseOperand(inst, op_rhs, 1, rhs)) { // RHS dies; use it as the destination. // Both operands cannot be memory. src_inst = op_lhs; @@ -1747,16 +1749,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } // Now for step 2, we perform the actual op + const inst_ty = self.air.typeOfIndex(inst); const air_tags = self.air.instructions.items(.tag); switch (air_tags[inst]) { // TODO: Generate wrapping and non-wrapping versions separately - .add, .addwrap => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 0, 0x00), - .bool_or, .bit_or => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 1, 0x08), - .bool_and, .bit_and => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 4, 0x20), - .sub, .subwrap => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 5, 0x28), - .xor, .not => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 6, 0x30), + .add, .addwrap => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 0, 0x00), + .bool_or, .bit_or => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 1, 0x08), + .bool_and, .bit_and => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 4, 0x20), + .sub, .subwrap => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 5, 0x28), + .xor, .not => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 6, 0x30), - .mul, .mulwrap => try self.genX8664Imul(inst.src, inst.ty, dst_mcv, src_mcv), + .mul, .mulwrap => try self.genX8664Imul(inst_ty, dst_mcv, src_mcv), else => unreachable, } @@ -1958,7 +1961,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .ptr_stack_offset => unreachable, .ptr_embedded_in_code => unreachable, .register => |src_reg| { - try self.genX8664ModRMRegToStack(src, dst_ty, off, src_reg, mr + 0x1); + try self.genX8664ModRMRegToStack(dst_ty, off, src_reg, mr + 0x1); }, .immediate => |imm| { _ = imm; @@ -1984,7 +1987,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// Performs integer multiplication between dst_mcv and src_mcv, storing the result in dst_mcv. fn genX8664Imul( self: *Self, - src: LazySrcLoc, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue, @@ -2067,7 +2069,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { encoder.imm32(@intCast(i32, imm)); } else { const src_reg = try self.copyToTmpRegister(dst_ty, src_mcv); - return self.genX8664Imul(src, dst_ty, dst_mcv, MCValue{ .register = src_reg }); + return self.genX8664Imul(dst_ty, dst_mcv, MCValue{ .register = src_reg }); } }, .embedded_in_code, .memory, .stack_offset => { @@ -2163,7 +2165,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, mcv: MCValue) !void { - const ty_str = self.air.instruction.items(.data)[inst].ty_str; + const ty_str = self.air.instructions.items(.data)[inst].ty_str; const zir = &self.mod_fn.owner_decl.namespace.file_scope.zir; const name = zir.nullTerminatedString(ty_str.str); const name_with_null = name.ptr[0 .. name.len + 1]; @@ -2224,11 +2226,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genArg(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airArg(self: *Self, inst: Air.Inst.Index) !void { const arg_index = self.arg_index; self.arg_index += 1; - const ty = self.air.getType(inst); + const ty = self.air.typeOfIndex(inst); const result = self.args[arg_index]; const mcv = switch (arch) { @@ -2252,7 +2254,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.genArgDbgInfo(inst, mcv); if (self.liveness.isUnused(inst)) - return MCValue.dead; + return self.finishAirBookkeeping(); switch (mcv) { .register => |reg| { @@ -2261,10 +2263,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { else => {}, } - return mcv; + return self.finishAir(inst, mcv, .{ .none, .none, .none }); } - fn genBreakpoint(self: *Self) !MCValue { + fn airBreakpoint(self: *Self) !void { switch (arch) { .i386, .x86_64 => { try self.code.append(0xcc); // int3 @@ -2280,15 +2282,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, else => return self.fail("TODO implement @breakpoint() for {}", .{self.target.cpu.arch}), } - return .none; + return self.finishAirBookkeeping(); } - fn genCall(self: *Self, inst: Air.Inst.Index) !MCValue { - const pl_op = self.air.instruction.items(.data)[inst].pl_op; - const fn_ty = self.air.getType(pl_op.operand); + fn airCall(self: *Self, inst: Air.Inst.Index) !void { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const fn_ty = self.air.typeOf(pl_op.operand); const callee = pl_op.operand; - const extra = self.air.extraData(Air.Call, inst_data.payload); - const args = self.air.extra[extra.end..][0..extra.data.args_len]; + const extra = self.air.extraData(Air.Call, pl_op.payload); + const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); var info = try self.resolveCallingConventionValues(fn_ty); defer info.deinit(self); @@ -2300,6 +2302,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .x86_64 => { for (info.args) |mc_arg, arg_i| { const arg = args[arg_i]; + const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); // Here we do not use setRegOrMem even though the logic is similar, because // the function call will move the stack pointer, so the offsets are different. @@ -2307,12 +2310,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .none => continue, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => |off| { // Here we need to emit instructions like this: // mov qword ptr [rsp + stack_offset], x - try self.genSetStack(arg.ty, off, arg_mcv); + try self.genSetStack(arg_ty, off, arg_mcv); }, .ptr_stack_offset => { return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); @@ -2389,6 +2392,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .arm, .armeb => { for (info.args) |mc_arg, arg_i| { const arg = args[arg_i]; + const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -2403,7 +2407,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned => unreachable, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => { return self.fail("TODO implement calling with parameters in memory", .{}); @@ -2452,6 +2456,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .aarch64 => { for (info.args) |mc_arg, arg_i| { const arg = args[arg_i]; + const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -2466,7 +2471,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned => unreachable, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => { return self.fail("TODO implement calling with parameters in memory", .{}); @@ -2510,6 +2515,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { for (info.args) |mc_arg, arg_i| { const arg = args[arg_i]; + const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); // Here we do not use setRegOrMem even though the logic is similar, because // the function call will move the stack pointer, so the offsets are different. @@ -2521,7 +2527,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .x86_64, .aarch64 => try self.register_manager.getReg(reg, null), else => unreachable, } - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => { // Here we need to emit instructions like this: @@ -2612,6 +2618,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .x86_64 => { for (info.args) |mc_arg, arg_i| { const arg = args[arg_i]; + const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); // Here we do not use setRegOrMem even though the logic is similar, because // the function call will move the stack pointer, so the offsets are different. @@ -2619,7 +2626,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .none => continue, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => { // Here we need to emit instructions like this: @@ -2661,6 +2668,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .aarch64 => { for (info.args) |mc_arg, arg_i| { const arg = inst.args[arg_i]; + const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(inst.args[arg_i]); switch (mc_arg) { @@ -2675,7 +2683,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned => unreachable, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => { return self.fail("TODO implement calling with parameters in memory", .{}); @@ -2696,7 +2704,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const got_index = func_payload.data.owner_decl.link.plan9.got_index.?; const fn_got_addr = got_addr + got_index * ptr_bytes; - try self.genSetReg(inst.base.src, Type.initTag(.usize), .x30, .{ .memory = fn_got_addr }); + try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr }); writeInt(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32()); } else if (func_value.castTag(.extern_fn)) |_| { @@ -2712,51 +2720,61 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } else unreachable; - switch (info.return_value) { - .register => |reg| { - if (Register.allocIndex(reg) == null) { - // Save function return value in a callee saved register - return try self.copyToNewRegister(inst, info.return_value); - } - }, - else => {}, - } + const result: MCValue = result: { + switch (info.return_value) { + .register => |reg| { + if (Register.allocIndex(reg) == null) { + // Save function return value in a callee saved register + break :result try self.copyToNewRegister(inst, info.return_value); + } + }, + else => {}, + } + break :result info.return_value; + }; - return info.return_value; + if (args.len <= Liveness.bpi - 2) { + var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); + buf[0] = callee; + std.mem.copy(Air.Inst.Ref, buf[1..], args); + return self.finishAir(inst, result, buf); + } + @panic("TODO: codegen for function call with greater than 2 args"); } - fn genRef(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airRef(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.getType(ty_op.operand); - const operand = try self.resolveInst(ty_op.operand); - switch (operand) { - .unreach => unreachable, - .dead => unreachable, - .none => return .none, - - .immediate, - .register, - .ptr_stack_offset, - .ptr_embedded_in_code, - .compare_flags_unsigned, - .compare_flags_signed, - => { - const stack_offset = try self.allocMemPtr(inst); - try self.genSetStack(operand_ty, stack_offset, operand); - return MCValue{ .ptr_stack_offset = stack_offset }; - }, + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ty = self.air.typeOf(ty_op.operand); + const operand = try self.resolveInst(ty_op.operand); + switch (operand) { + .unreach => unreachable, + .dead => unreachable, + .none => break :result MCValue{ .none = {} }, + + .immediate, + .register, + .ptr_stack_offset, + .ptr_embedded_in_code, + .compare_flags_unsigned, + .compare_flags_signed, + => { + const stack_offset = try self.allocMemPtr(inst); + try self.genSetStack(operand_ty, stack_offset, operand); + break :result MCValue{ .ptr_stack_offset = stack_offset }; + }, - .stack_offset => |offset| return MCValue{ .ptr_stack_offset = offset }, - .embedded_in_code => |offset| return MCValue{ .ptr_embedded_in_code = offset }, - .memory => |vaddr| return MCValue{ .immediate = vaddr }, + .stack_offset => |offset| break :result MCValue{ .ptr_stack_offset = offset }, + .embedded_in_code => |offset| break :result MCValue{ .ptr_embedded_in_code = offset }, + .memory => |vaddr| break :result MCValue{ .immediate = vaddr }, - .undef => return self.fail("TODO implement ref on an undefined value", .{}), - } + .undef => return self.fail("TODO implement ref on an undefined value", .{}), + } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn ret(self: *Self, mcv: MCValue) !MCValue { + fn ret(self: *Self, mcv: MCValue) !void { const ret_ty = self.fn_type.fnReturnType(); try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); switch (arch) { @@ -2786,28 +2804,28 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, else => return self.fail("TODO implement return for {}", .{self.target.cpu.arch}), } - return .unreach; } - fn genRet(self: *Self, inst: Air.Inst.Index) !MCValue { - const operand = try self.resolveInst(self.air.instructions.items(.data)[inst].un_op); - return self.ret(inst.base.src, operand); + fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); + try self.ret(operand); + return self.finishAirBookkeeping(); } - fn genCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.getType(bin_op.lhs); - assert(ty.eql(self.air.getType(bin_op.rhs))); + if (self.liveness.isUnused(inst)) + return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); + const ty = self.air.typeOf(bin_op.lhs); + assert(ty.eql(self.air.typeOf(bin_op.rhs))); if (ty.zigTypeTag() == .ErrorSet) return self.fail("TODO implement cmp for errors", .{}); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - switch (arch) { - .x86_64 => { + const result: MCValue = switch (arch) { + .x86_64 => result: { try self.code.ensureCapacity(self.code.items.len + 8); // There are 2 operands, destination and source. @@ -2822,12 +2840,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.genX8664BinMathCode(Type.initTag(.bool), dst_mcv, src_mcv, 7, 0x38); const info = ty.intInfo(self.target.*); - return switch (info.signedness) { + break :result switch (info.signedness) { .signed => MCValue{ .compare_flags_signed = op }, .unsigned => MCValue{ .compare_flags_unsigned = op }, }; }, - .arm, .armeb => { + .arm, .armeb => result: { const lhs_is_register = lhs == .register; const rhs_is_register = rhs == .register; // lhs should always be a register @@ -2854,39 +2872,40 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; if (lhs_mcv == .register and !lhs_is_register) { try self.genSetReg(ty, lhs_mcv.register, lhs); - branch.inst_table.putAssumeCapacity(bin_op.lhs, lhs); + branch.inst_table.putAssumeCapacity(Air.refToIndex(bin_op.lhs).?, lhs); } if (rhs_mcv == .register and !rhs_is_register) { try self.genSetReg(ty, rhs_mcv.register, rhs); - branch.inst_table.putAssumeCapacity(bin_op.rhs, rhs); + branch.inst_table.putAssumeCapacity(Air.refToIndex(bin_op.rhs).?, rhs); } // The destination register is not present in the cmp instruction try self.genArmBinOpCode(undefined, lhs_mcv, rhs_mcv, false, .cmp_eq); const info = ty.intInfo(self.target.*); - return switch (info.signedness) { + break :result switch (info.signedness) { .signed => MCValue{ .compare_flags_signed = op }, .unsigned => MCValue{ .compare_flags_unsigned = op }, }; }, else => return self.fail("TODO implement cmp for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genDbgStmt(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; try self.dbgAdvancePCAndLine(dbg_stmt.line, dbg_stmt.column); - assert(self.liveness.isUnused(inst)); - return MCValue.dead; + return self.finishAirBookkeeping(); } - fn genCondBr(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); - const extra = self.air.extraData(Air.CondBr, inst_data.payload); + const extra = self.air.extraData(Air.CondBr, pl_op.payload); const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; + const liveness_condbr = self.liveness.getCondBr(inst); const reloc: Reloc = switch (arch) { .i386, .x86_64 => reloc: { @@ -2985,9 +3004,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.branch_stack.append(.{}); - const then_deaths = self.liveness.thenDeaths(inst); - try self.ensureProcessDeathCapacity(then_deaths.len); - for (then_deaths) |operand| { + try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len); + for (liveness_condbr.then_deaths) |operand| { self.processDeath(operand); } try self.genBody(then_body); @@ -3010,9 +3028,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const else_branch = self.branch_stack.addOneAssumeCapacity(); else_branch.* = .{}; - const else_deaths = self.liveness.elseDeaths(inst); - try self.ensureProcessDeathCapacity(else_deaths.len); - for (else_deaths) |operand| { + try self.ensureProcessDeathCapacity(liveness_condbr.else_deaths.len); + for (liveness_condbr.else_deaths) |operand| { self.processDeath(operand); } try self.genBody(else_body); @@ -3026,8 +3043,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // assert that parent_branch.free_registers equals the saved_then_branch.free_registers // rather than assigning it. const parent_branch = &self.branch_stack.items[self.branch_stack.items.len - 2]; - try parent_branch.inst_table.ensureCapacity(self.gpa, parent_branch.inst_table.count() + - else_branch.inst_table.count()); + try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, else_branch.inst_table.count()); const else_slice = else_branch.inst_table.entries.slice(); const else_keys = else_slice.items(.key); @@ -3058,11 +3074,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { log.debug("consolidating else_entry {*} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(else_key.ty, canon_mcv, else_value); + try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } - try parent_branch.inst_table.ensureCapacity(self.gpa, parent_branch.inst_table.count() + - saved_then_branch.inst_table.count()); + try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); const then_slice = saved_then_branch.inst_table.entries.slice(); const then_keys = then_slice.items(.key); const then_values = then_slice.items(.value); @@ -3086,13 +3101,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { log.debug("consolidating then_entry {*} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(then_key.ty, parent_mcv, then_value); + try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } self.branch_stack.pop().deinit(self.gpa); - return MCValue.unreach; + return self.finishAir(inst, .unreach, .{ pl_op.operand, .none, .none }); } fn isNull(self: *Self, operand: MCValue) !MCValue { @@ -3131,107 +3146,115 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genIsNull(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand = try self.resolveInst(un_op); - return self.isNull(operand); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand = try self.resolveInst(un_op); + break :result try self.isNull(operand); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genIsNullPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand_ptr = try self.resolveInst(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ptr = try self.resolveInst(un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + break :result try self.isNull(operand); }; - try self.load(operand, ptr); - return self.isNull(operand); + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genIsNonNull(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand = try self.resolveInst(un_op); - return self.isNonNull(operand); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand = try self.resolveInst(un_op); + break :result try self.isNonNull(operand); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand_ptr = try self.resolveInst(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ptr = try self.resolveInst(un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + break :result try self.isNonNull(operand); }; - try self.load(operand, ptr); - return self.isNonNull(operand); + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genIsErr(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand = try self.resolveInst(un_op); - return self.isErr(operand); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand = try self.resolveInst(un_op); + break :result try self.isErr(operand); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genIsErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand_ptr = try self.resolveInst(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ptr = try self.resolveInst(un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + break :result try self.isErr(operand); }; - try self.load(operand, ptr); - return self.isErr(operand); + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genIsNonErr(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand = try self.resolveInst(un_op); - return self.isNonErr(operand); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand = try self.resolveInst(un_op); + break :result try self.isNonErr(operand); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand_ptr = try self.resolveInst(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ptr = try self.resolveInst(un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + break :result try self.isNonErr(operand); }; - try self.load(operand, ptr); - return self.isNonErr(operand); + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genLoop(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airLoop(self: *Self, inst: Air.Inst.Index) !void { // A loop is a setup to be able to jump back to the beginning. const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); @@ -3239,7 +3262,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const start_index = self.code.items.len; try self.genBody(body); try self.jump(start_index); - return MCValue.unreach; + return self.finishAirBookkeeping(); } /// Send control flow to the `index` of `self.code`. @@ -3274,7 +3297,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genBlock(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airBlock(self: *Self, inst: Air.Inst.Index) !void { try self.blocks.putNoClobber(self.gpa, inst, .{ // A block is a setup to be able to jump to the end. .relocs = .{}, @@ -3288,21 +3311,24 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const block_data = self.blocks.getPtr(inst).?; defer block_data.relocs.deinit(self.gpa); - const ty_pl = self.air.instructions.items(.data).ty_pl; + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; try self.genBody(body); for (block_data.relocs.items) |reloc| try self.performReloc(reloc); - return @bitCast(MCValue, block_data.mcv); + const result = @bitCast(MCValue, block_data.mcv); + return self.finishAir(inst, result, .{ .none, .none, .none }); } - fn genSwitch(self: *Self, inst: Air.Inst.Index) !MCValue { - _ = inst; + fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const condition = pl_op.operand; switch (arch) { - else => return self.fail("TODO genSwitch for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO airSwitch for {}", .{self.target.cpu.arch}), } + return self.finishAir(inst, .dead, .{ condition, .none, .none }); } fn performReloc(self: *Self, reloc: Reloc) !void { @@ -3335,54 +3361,49 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genBrBlockFlat(self: *Self, inst: Air.Inst.Index) !MCValue { - try self.genBody(inst.body); - const last = inst.body.instructions[inst.body.instructions.len - 1]; - return self.br(inst.block, last); - } - - fn genBr(self: *Self, inst: Air.Inst.Index) !MCValue { - return self.br(inst.block, inst.operand); + fn airBr(self: *Self, inst: Air.Inst.Index) !void { + const branch = self.air.instructions.items(.data)[inst].br; + try self.br(branch.block_inst, branch.operand); + return self.finishAirBookkeeping(); } - fn genBoolOp(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const air_tags = self.air.instructions.items(.tag); - switch (arch) { + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { .x86_64 => switch (air_tags[inst]) { // lhs AND rhs - .bool_and => return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .bool_and => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), // lhs OR rhs - .bool_or => return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .bool_or => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), else => unreachable, // Not a boolean operation }, .arm, .armeb => switch (air_tags[inst]) { - .bool_and => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_and), - .bool_or => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_or), + .bool_and => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_and), + .bool_or => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_or), else => unreachable, // Not a boolean operation }, else => return self.fail("TODO implement boolean operations for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Index) !MCValue { + fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; - if (operand.ty.hasCodeGenBits()) { + if (self.air.typeOf(operand).hasCodeGenBits()) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { block_data.mcv = operand_mcv; } else { - try self.setRegOrMem(block.base.ty, block_mcv, operand_mcv); + try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); } - fn brVoid(self: *Self, block: Air.Inst.Index) !MCValue { + fn brVoid(self: *Self, block: Air.Inst.Index) !void { const block_data = self.blocks.getPtr(block).?; // Emit a jump with a relocation. It will be patched up after the block ends. @@ -3408,131 +3429,170 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, else => return self.fail("TODO implement brvoid for {}", .{self.target.cpu.arch}), } - return .none; } - fn genAsm(self: *Self, inst: Air.Inst.Index) !MCValue { - if (!inst.is_volatile and self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { - .arm, .armeb => { - for (inst.inputs) |input, i| { - if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail("unrecognized asm input constraint: '{s}'", .{input}); + fn airAsm(self: *Self, inst: Air.Inst.Index) !void { + const air_datas = self.air.instructions.items(.data); + const air_extra = self.air.extraData(Air.Asm, air_datas[inst].ty_pl.payload); + const zir = self.mod_fn.owner_decl.namespace.file_scope.zir; + const extended = zir.instructions.items(.data)[air_extra.data.zir_index].extended; + const zir_extra = zir.extraData(Zir.Inst.Asm, extended.operand); + const asm_source = zir.nullTerminatedString(zir_extra.data.asm_source); + const outputs_len = @truncate(u5, extended.small); + const args_len = @truncate(u5, extended.small >> 5); + const clobbers_len = @truncate(u5, extended.small >> 10); + _ = clobbers_len; // TODO honor these + const is_volatile = @truncate(u1, extended.small >> 15) != 0; + const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[air_extra.end..][0..outputs_len]); + const args = @bitCast([]const Air.Inst.Ref, self.air.extra[air_extra.end + outputs.len ..][0..args_len]); + + if (outputs_len > 1) { + return self.fail("TODO implement codegen for asm with more than 1 output", .{}); + } + var extra_i: usize = zir_extra.end; + const output_constraint: ?[]const u8 = out: { + var i: usize = 0; + while (i < outputs_len) : (i += 1) { + const output = zir.extraData(Zir.Inst.Asm.Output, extra_i); + extra_i = output.end; + break :out zir.nullTerminatedString(output.data.constraint); + } + break :out null; + }; + + const dead = !is_volatile and self.liveness.isUnused(inst); + const result: MCValue = if (dead) .dead else switch (arch) { + .arm, .armeb => result: { + for (args) |arg| { + const input = zir.extraData(Zir.Inst.Asm.Input, extra_i); + extra_i = input.end; + const constraint = zir.nullTerminatedString(input.data.constraint); + + if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') { + return self.fail("unrecognized asm input constraint: '{s}'", .{constraint}); } - const reg_name = input[1 .. input.len - 1]; + const reg_name = constraint[1 .. constraint.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(self.air.typeOf(arg), reg, arg_mcv); } - if (mem.eql(u8, inst.asm_source, "svc #0")) { + if (mem.eql(u8, asm_source, "svc #0")) { writeInt(u32, try self.code.addManyAsArray(4), Instruction.svc(.al, 0).toU32()); } else { return self.fail("TODO implement support for more arm assembly instructions", .{}); } - if (inst.output_constraint) |output| { + if (output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - return MCValue{ .register = reg }; + + break :result MCValue{ .register = reg }; } else { - return MCValue.none; + break :result MCValue.none; } }, - .aarch64 => { - for (inst.inputs) |input, i| { - if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail("unrecognized asm input constraint: '{s}'", .{input}); + .aarch64 => result: { + for (args) |arg| { + const input = zir.extraData(Zir.Inst.Asm.Input, extra_i); + extra_i = input.end; + const constraint = zir.nullTerminatedString(input.data.constraint); + + if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') { + return self.fail("unrecognized asm input constraint: '{s}'", .{constraint}); } - const reg_name = input[1 .. input.len - 1]; + const reg_name = constraint[1 .. constraint.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(self.air.typeOf(arg), reg, arg_mcv); } - if (mem.eql(u8, inst.asm_source, "svc #0")) { + if (mem.eql(u8, asm_source, "svc #0")) { mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.svc(0x0).toU32()); - } else if (mem.eql(u8, inst.asm_source, "svc #0x80")) { + } else if (mem.eql(u8, asm_source, "svc #0x80")) { mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.svc(0x80).toU32()); } else { return self.fail("TODO implement support for more aarch64 assembly instructions", .{}); } - if (inst.output_constraint) |output| { + if (output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - return MCValue{ .register = reg }; + break :result MCValue{ .register = reg }; } else { - return MCValue.none; + break :result MCValue.none; } }, - .riscv64 => { - for (inst.inputs) |input, i| { - if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail("unrecognized asm input constraint: '{s}'", .{input}); + .riscv64 => result: { + for (args) |arg| { + const input = zir.extraData(Zir.Inst.Asm.Input, extra_i); + extra_i = input.end; + const constraint = zir.nullTerminatedString(input.data.constraint); + + if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') { + return self.fail("unrecognized asm input constraint: '{s}'", .{constraint}); } - const reg_name = input[1 .. input.len - 1]; + const reg_name = constraint[1 .. constraint.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(self.air.typeOf(arg), reg, arg_mcv); } - if (mem.eql(u8, inst.asm_source, "ecall")) { + if (mem.eql(u8, asm_source, "ecall")) { mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ecall.toU32()); } else { return self.fail("TODO implement support for more riscv64 assembly instructions", .{}); } - if (inst.output_constraint) |output| { + if (output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - return MCValue{ .register = reg }; + break :result MCValue{ .register = reg }; } else { - return MCValue.none; + break :result MCValue.none; } }, - .x86_64, .i386 => { - for (inst.inputs) |input, i| { - if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail("unrecognized asm input constraint: '{s}'", .{input}); + .x86_64, .i386 => result: { + for (args) |arg| { + const input = zir.extraData(Zir.Inst.Asm.Input, extra_i); + extra_i = input.end; + const constraint = zir.nullTerminatedString(input.data.constraint); + + if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') { + return self.fail("unrecognized asm input constraint: '{s}'", .{constraint}); } - const reg_name = input[1 .. input.len - 1]; + const reg_name = constraint[1 .. constraint.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(self.air.typeOf(arg), reg, arg_mcv); } { - var iter = std.mem.tokenize(inst.asm_source, "\n\r"); + var iter = std.mem.tokenize(asm_source, "\n\r"); while (iter.next()) |ins| { if (mem.eql(u8, ins, "syscall")) { try self.code.appendSlice(&[_]u8{ 0x0f, 0x05 }); @@ -3571,20 +3631,27 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - if (inst.output_constraint) |output| { + if (output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - return MCValue{ .register = reg }; + break :result MCValue{ .register = reg }; } else { - return MCValue.none; + break :result MCValue{ .none = {} }; } }, else => return self.fail("TODO implement inline asm support for more architectures", .{}), + }; + if (outputs.len + args.len <= Liveness.bpi - 1) { + var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); + std.mem.copy(Air.Inst.Ref, &buf, outputs); + std.mem.copy(Air.Inst.Ref, buf[outputs.len..], args); + return self.finishAir(inst, result, buf); } + @panic("TODO: codegen for asm with greater than 3 args"); } /// Sets the value without any modifications to register allocation metadata or stack allocation metadata. @@ -3761,7 +3828,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); }, .register => |reg| { - try self.genX8664ModRMRegToStack(src, ty, stack_offset, reg, 0x89); + try self.genX8664ModRMRegToStack(ty, stack_offset, reg, 0x89); }, .memory => |vaddr| { _ = vaddr; @@ -4409,32 +4476,48 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genPtrToInt(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - return self.resolveInst(un_op); + const result = try self.resolveInst(un_op); + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genBitCast(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - return self.resolveInst(ty_op.operand); + const result = try self.resolveInst(ty_op.operand); + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn resolveInst(self: *Self, inst: Air.Inst.Index) !MCValue { - // If the type has no codegen bits, no need to store it. - if (!inst.ty.hasCodeGenBits()) - return MCValue.none; - - // Constants have static lifetimes, so they are always memoized in the outer most table. - if (inst.castTag(.constant)) |const_inst| { - const branch = &self.branch_stack.items[0]; - const gop = try branch.inst_table.getOrPut(self.gpa, inst); - if (!gop.found_existing) { - gop.value_ptr.* = try self.genTypedValue(inst.src, .{ .ty = inst.ty, .val = const_inst.val }); - } - return gop.value_ptr.*; + fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { + // First section of indexes correspond to a set number of constant values. + const ref_int = @enumToInt(inst); + if (ref_int < Air.Inst.Ref.typed_value_map.len) { + return self.genTypedValue(Air.Inst.Ref.typed_value_map[ref_int]); } - return self.getResolvedInstValue(inst); + // If the type has no codegen bits, no need to store it. + const inst_ty = self.air.typeOf(inst); + if (!inst_ty.hasCodeGenBits()) + return MCValue{ .none = {} }; + + const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + switch (self.air.instructions.items(.tag)[inst_index]) { + .constant => { + // Constants have static lifetimes, so they are always memoized in the outer most table. + const branch = &self.branch_stack.items[0]; + const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); + if (!gop.found_existing) { + const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + gop.value_ptr.* = try self.genTypedValue(.{ + .ty = inst_ty, + .val = self.air.values[ty_pl.payload], + }); + } + return gop.value_ptr.*; + }, + .const_ty => unreachable, + else => return self.getResolvedInstValue(inst_index), + } } fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { @@ -4454,8 +4537,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// A potential opportunity for future optimization here would be keeping track /// of the fact that the instruction is available both as an immediate /// and as a register. - fn limitImmediateType(self: *Self, inst: Air.Inst.Index, comptime T: type) !MCValue { - const mcv = try self.resolveInst(inst); + fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCValue { + const mcv = try self.resolveInst(operand); const ti = @typeInfo(T).Int; switch (mcv) { .immediate => |imm| { @@ -4470,7 +4553,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return mcv; } - fn genTypedValue(self: *Self, src: LazySrcLoc, typed_value: TypedValue) InnerError!MCValue { + fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { if (typed_value.val.isUndef()) return MCValue{ .undef = {} }; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); @@ -4480,7 +4563,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .Slice => { var buf: Type.Payload.ElemType = undefined; const ptr_type = typed_value.ty.slicePtrFieldType(&buf); - const ptr_mcv = try self.genTypedValue(src, .{ .ty = ptr_type, .val = typed_value.val }); + const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val }); const slice_len = typed_value.val.sliceLen(); // Codegen can't handle some kinds of indirection. If the wrong union field is accessed here it may mean // the Sema code needs to use anonymous Decls or alloca instructions to store data. @@ -4541,7 +4624,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return MCValue{ .immediate = 0 }; var buf: Type.Payload.ElemType = undefined; - return self.genTypedValue(src, .{ + return self.genTypedValue(.{ .ty = typed_value.ty.optionalChild(&buf), .val = typed_value.val, }); -- cgit v1.2.3 From d17f492017c77d5d52d2fbd65eaa5c1e08b24161 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Jul 2021 23:06:59 -0700 Subject: stage2: miscellaneous fixes for the branch * Breaking language change: inline assembly must use string literal syntax. This is in preparation for inline assembly improvements that involve more integration with the Zig language. This means we cannot rely on text substitution. * Liveness: properly handle inline assembly and function calls with more than 3 operands. - More than 35 operands is not yet supported. This is a low priority to implement. - This required implementation in codegen.zig as well. * Liveness: fix bug causing incorrect tomb bits. * Sema: enable switch expressions that are evaluated at compile-time. - Runtime switch instructions still need to be reworked in this branch. There was a TODO left here (by me) with a suggestion to do some bigger changes as part of the AIR memory reworking. Now that time has come and I plan to honor the suggestion in a future commit before merging this branch. * AIR printing: fix missing ')' on alive instructions. We're back to "hello world" working for the x86_64 backend. --- lib/std/Thread.zig | 64 ++++++--- lib/std/atomic.zig | 38 +++--- lib/std/atomic/Atomic.zig | 88 +++++++++--- src/AstGen.zig | 2 +- src/Liveness.zig | 72 +++++++++- src/Sema.zig | 333 +++++++++++++++++++++++----------------------- src/codegen.zig | 74 ++++++++++- src/print_air.zig | 2 +- 8 files changed, 441 insertions(+), 232 deletions(-) (limited to 'src/Liveness.zig') diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 91f7ff58c3..58a409c64e 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -505,8 +505,8 @@ const LinuxThreadImpl = struct { /// Ported over from musl libc's pthread detached implementation: /// https://github.com/ifduyue/musl/search?q=__unmapself fn freeAndExit(self: *ThreadCompletion) noreturn { - const unmap_and_exit: []const u8 = switch (target.cpu.arch) { - .i386 => ( + switch (target.cpu.arch) { + .i386 => asm volatile ( \\ movl $91, %%eax \\ movl %[ptr], %%ebx \\ movl %[len], %%ecx @@ -514,8 +514,12 @@ const LinuxThreadImpl = struct { \\ movl $1, %%eax \\ movl $0, %%ebx \\ int $128 + : + : [ptr] "r" (@ptrToInt(self.mapped.ptr)), + [len] "r" (self.mapped.len) + : "memory" ), - .x86_64 => ( + .x86_64 => asm volatile ( \\ movq $11, %%rax \\ movq %[ptr], %%rbx \\ movq %[len], %%rcx @@ -523,8 +527,12 @@ const LinuxThreadImpl = struct { \\ movq $60, %%rax \\ movq $1, %%rdi \\ syscall + : + : [ptr] "r" (@ptrToInt(self.mapped.ptr)), + [len] "r" (self.mapped.len) + : "memory" ), - .arm, .armeb, .thumb, .thumbeb => ( + .arm, .armeb, .thumb, .thumbeb => asm volatile ( \\ mov r7, #91 \\ mov r0, %[ptr] \\ mov r1, %[len] @@ -532,8 +540,12 @@ const LinuxThreadImpl = struct { \\ mov r7, #1 \\ mov r0, #0 \\ svc 0 + : + : [ptr] "r" (@ptrToInt(self.mapped.ptr)), + [len] "r" (self.mapped.len) + : "memory" ), - .aarch64, .aarch64_be, .aarch64_32 => ( + .aarch64, .aarch64_be, .aarch64_32 => asm volatile ( \\ mov x8, #215 \\ mov x0, %[ptr] \\ mov x1, %[len] @@ -541,8 +553,12 @@ const LinuxThreadImpl = struct { \\ mov x8, #93 \\ mov x0, #0 \\ svc 0 + : + : [ptr] "r" (@ptrToInt(self.mapped.ptr)), + [len] "r" (self.mapped.len) + : "memory" ), - .mips, .mipsel => ( + .mips, .mipsel => asm volatile ( \\ move $sp, $25 \\ li $2, 4091 \\ move $4, %[ptr] @@ -551,8 +567,12 @@ const LinuxThreadImpl = struct { \\ li $2, 4001 \\ li $4, 0 \\ syscall + : + : [ptr] "r" (@ptrToInt(self.mapped.ptr)), + [len] "r" (self.mapped.len) + : "memory" ), - .mips64, .mips64el => ( + .mips64, .mips64el => asm volatile ( \\ li $2, 4091 \\ move $4, %[ptr] \\ move $5, %[len] @@ -560,8 +580,12 @@ const LinuxThreadImpl = struct { \\ li $2, 4001 \\ li $4, 0 \\ syscall + : + : [ptr] "r" (@ptrToInt(self.mapped.ptr)), + [len] "r" (self.mapped.len) + : "memory" ), - .powerpc, .powerpcle, .powerpc64, .powerpc64le => ( + .powerpc, .powerpcle, .powerpc64, .powerpc64le => asm volatile ( \\ li 0, 91 \\ mr %[ptr], 3 \\ mr %[len], 4 @@ -570,8 +594,12 @@ const LinuxThreadImpl = struct { \\ li 3, 0 \\ sc \\ blr + : + : [ptr] "r" (@ptrToInt(self.mapped.ptr)), + [len] "r" (self.mapped.len) + : "memory" ), - .riscv64 => ( + .riscv64 => asm volatile ( \\ li a7, 215 \\ mv a0, %[ptr] \\ mv a1, %[len] @@ -579,19 +607,13 @@ const LinuxThreadImpl = struct { \\ li a7, 93 \\ mv a0, zero \\ ecall + : + : [ptr] "r" (@ptrToInt(self.mapped.ptr)), + [len] "r" (self.mapped.len) + : "memory" ), - else => |cpu_arch| { - @compileLog("Unsupported linux arch ", cpu_arch); - }, - }; - - asm volatile (unmap_and_exit - : - : [ptr] "r" (@ptrToInt(self.mapped.ptr)), - [len] "r" (self.mapped.len) - : "memory" - ); - + else => |cpu_arch| @compileError("Unsupported linux arch: " ++ @tagName(cpu_arch)), + } unreachable; } }; diff --git a/lib/std/atomic.zig b/lib/std/atomic.zig index 1944e5346b..42d57eb8fa 100644 --- a/lib/std/atomic.zig +++ b/lib/std/atomic.zig @@ -46,34 +46,38 @@ test "fence/compilerFence" { /// Signals to the processor that the caller is inside a busy-wait spin-loop. pub inline fn spinLoopHint() void { - const hint_instruction = switch (target.cpu.arch) { - // No-op instruction that can hint to save (or share with a hardware-thread) pipelining/power resources + switch (target.cpu.arch) { + // No-op instruction that can hint to save (or share with a hardware-thread) + // pipelining/power resources // https://software.intel.com/content/www/us/en/develop/articles/benefitting-power-and-performance-sleep-loops.html - .i386, .x86_64 => "pause", + .i386, .x86_64 => asm volatile ("pause" ::: "memory"), // No-op instruction that serves as a hardware-thread resource yield hint. // https://stackoverflow.com/a/7588941 - .powerpc64, .powerpc64le => "or 27, 27, 27", + .powerpc64, .powerpc64le => asm volatile ("or 27, 27, 27" ::: "memory"), - // `isb` appears more reliable for releasing execution resources than `yield` on common aarch64 CPUs. + // `isb` appears more reliable for releasing execution resources than `yield` + // on common aarch64 CPUs. // https://bugs.java.com/bugdatabase/view_bug.do?bug_id=8258604 // https://bugs.mysql.com/bug.php?id=100664 - .aarch64, .aarch64_be, .aarch64_32 => "isb", + .aarch64, .aarch64_be, .aarch64_32 => asm volatile ("isb" ::: "memory"), // `yield` was introduced in v6k but is also available on v6m. // https://www.keil.com/support/man/docs/armasm/armasm_dom1361289926796.htm - .arm, .armeb, .thumb, .thumbeb => blk: { - const can_yield = comptime std.Target.arm.featureSetHasAny(target.cpu.features, .{ .has_v6k, .has_v6m }); - const instruction = if (can_yield) "yield" else ""; - break :blk instruction; + .arm, .armeb, .thumb, .thumbeb => { + const can_yield = comptime std.Target.arm.featureSetHasAny(target.cpu.features, .{ + .has_v6k, .has_v6m, + }); + if (can_yield) { + asm volatile ("yield" ::: "memory"); + } else { + asm volatile ("" ::: "memory"); + } }, - - else => "", - }; - - // Memory barrier to prevent the compiler from optimizing away the spin-loop - // even if no hint_instruction was provided. - asm volatile (hint_instruction ::: "memory"); + // Memory barrier to prevent the compiler from optimizing away the spin-loop + // even if no hint_instruction was provided. + else => asm volatile ("" ::: "memory"), + } } test "spinLoopHint" { diff --git a/lib/std/atomic/Atomic.zig b/lib/std/atomic/Atomic.zig index 80fb1ae297..f4e3ebda9d 100644 --- a/lib/std/atomic/Atomic.zig +++ b/lib/std/atomic/Atomic.zig @@ -178,26 +178,78 @@ pub fn Atomic(comptime T: type) type { ) u1 { // x86 supports dedicated bitwise instructions if (comptime target.cpu.arch.isX86() and @sizeOf(T) >= 2 and @sizeOf(T) <= 8) { - const instruction = switch (op) { - .Set => "lock bts", - .Reset => "lock btr", - .Toggle => "lock btc", - }; - - const suffix = switch (@sizeOf(T)) { - 2 => "w", - 4 => "l", - 8 => "q", + const old_bit: u8 = switch (@sizeOf(T)) { + 2 => switch (op) { + .Set => asm volatile ("lock btsw %[bit], %[ptr]" + // LLVM doesn't support u1 flag register return values + : [result] "={@ccc}" (-> u8) + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ), + .Reset => asm volatile ("lock btrw %[bit], %[ptr]" + // LLVM doesn't support u1 flag register return values + : [result] "={@ccc}" (-> u8) + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ), + .Toggle => asm volatile ("lock btcw %[bit], %[ptr]" + // LLVM doesn't support u1 flag register return values + : [result] "={@ccc}" (-> u8) + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ), + }, + 4 => switch (op) { + .Set => asm volatile ("lock btsl %[bit], %[ptr]" + // LLVM doesn't support u1 flag register return values + : [result] "={@ccc}" (-> u8) + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ), + .Reset => asm volatile ("lock btrl %[bit], %[ptr]" + // LLVM doesn't support u1 flag register return values + : [result] "={@ccc}" (-> u8) + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ), + .Toggle => asm volatile ("lock btcl %[bit], %[ptr]" + // LLVM doesn't support u1 flag register return values + : [result] "={@ccc}" (-> u8) + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ), + }, + 8 => switch (op) { + .Set => asm volatile ("lock btsq %[bit], %[ptr]" + // LLVM doesn't support u1 flag register return values + : [result] "={@ccc}" (-> u8) + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ), + .Reset => asm volatile ("lock btrq %[bit], %[ptr]" + // LLVM doesn't support u1 flag register return values + : [result] "={@ccc}" (-> u8) + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ), + .Toggle => asm volatile ("lock btcq %[bit], %[ptr]" + // LLVM doesn't support u1 flag register return values + : [result] "={@ccc}" (-> u8) + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ), + }, else => @compileError("Invalid atomic type " ++ @typeName(T)), }; - - const old_bit = asm volatile (instruction ++ suffix ++ " %[bit], %[ptr]" - : [result] "={@ccc}" (-> u8) // LLVM doesn't support u1 flag register return values - : [ptr] "*p" (&self.value), - [bit] "X" (@as(T, bit)) - : "cc", "memory" - ); - return @intCast(u1, old_bit); } diff --git a/src/AstGen.zig b/src/AstGen.zig index cbd918ecc7..31e7f040a2 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -6601,7 +6601,7 @@ fn asmExpr( const asm_source = switch (node_tags[full.ast.template]) { .string_literal => try astgen.strLitAsString(main_tokens[full.ast.template]), .multiline_string_literal => try astgen.strLitNodeAsString(full.ast.template), - else => return astgen.failNode(node, "assembly code must use string literal syntax", .{}), + else => return astgen.failNode(full.ast.template, "assembly code must use string literal syntax", .{}), }; // See https://github.com/ziglang/zig/issues/215 and related issues discussing diff --git a/src/Liveness.zig b/src/Liveness.zig index 79fc0d7325..2c226122bf 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -24,6 +24,11 @@ const Log2Int = std.math.Log2Int; tomb_bits: []usize, /// Sparse table of specially handled instructions. The value is an index into the `extra` /// array. The meaning of the data depends on the AIR tag. +/// * `cond_br` - points to a `CondBr` in `extra` at this index. +/// * `switch_br` - points to a `SwitchBr` in `extra` at this index. +/// * `asm`, `call` - the value is a set of bits which are the extra tomb bits of operands. +/// The main tomb bits are still used and the extra ones are starting with the lsb of the +/// value here. special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), /// Auxilliary data. The way this data is interpreted is determined contextually. extra: []const u32, @@ -67,6 +72,8 @@ pub fn analyze(gpa: *Allocator, air: Air, zir: Zir) Allocator.Error!Liveness { defer a.extra.deinit(gpa); defer a.table.deinit(gpa); + std.mem.set(usize, a.tomb_bits, 0); + const main_body = air.getMainBody(); try a.table.ensureTotalCapacity(gpa, @intCast(u32, main_body.len)); try analyzeWithContext(&a, null, main_body); @@ -103,7 +110,7 @@ pub fn clearOperandDeath(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) const usize_index = (inst * bpi) / @bitSizeOf(usize); const mask = @as(usize, 1) << @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); - l.tomb_bits[usize_index] |= mask; + l.tomb_bits[usize_index] &= ~mask; } /// Higher level API. @@ -298,7 +305,17 @@ fn analyzeInst( std.mem.copy(Air.Inst.Ref, buf[1..], args); return trackOperands(a, new_set, inst, main_tomb, buf); } - @panic("TODO: liveness analysis for function call with greater than 2 args"); + var extra_tombs: ExtraTombs = .{ + .analysis = a, + .new_set = new_set, + .inst = inst, + .main_tomb = main_tomb, + }; + try extra_tombs.feed(callee); + for (args) |arg| { + try extra_tombs.feed(arg); + } + return extra_tombs.finish(); }, .struct_field_ptr => { const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data; @@ -317,7 +334,19 @@ fn analyzeInst( std.mem.copy(Air.Inst.Ref, buf[outputs.len..], args); return trackOperands(a, new_set, inst, main_tomb, buf); } - @panic("TODO: liveness analysis for asm with greater than 3 args"); + var extra_tombs: ExtraTombs = .{ + .analysis = a, + .new_set = new_set, + .inst = inst, + .main_tomb = main_tomb, + }; + for (outputs) |output| { + try extra_tombs.feed(output); + } + for (args) |arg| { + try extra_tombs.feed(arg); + } + return extra_tombs.finish(); }, .block => { const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); @@ -531,3 +560,40 @@ fn trackOperands( } a.storeTombBits(inst, tomb_bits); } + +const ExtraTombs = struct { + analysis: *Analysis, + new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), + inst: Air.Inst.Index, + main_tomb: bool, + bit_index: usize = 0, + tomb_bits: Bpi = 0, + big_tomb_bits: u32 = 0, + + fn feed(et: *ExtraTombs, op_ref: Air.Inst.Ref) !void { + const this_bit_index = et.bit_index; + assert(this_bit_index < 32); // TODO mechanism for when there are greater than 32 operands + et.bit_index += 1; + const gpa = et.analysis.gpa; + const op_int = @enumToInt(op_ref); + if (op_int < Air.Inst.Ref.typed_value_map.len) return; + const op_index: Air.Inst.Index = op_int - @intCast(u32, Air.Inst.Ref.typed_value_map.len); + const prev = try et.analysis.table.fetchPut(gpa, op_index, {}); + if (prev == null) { + // Death. + if (et.new_set) |ns| try ns.putNoClobber(gpa, op_index, {}); + if (this_bit_index < bpi - 1) { + et.tomb_bits |= @as(Bpi, 1) << @intCast(OperandInt, this_bit_index); + } else { + const big_bit_index = this_bit_index - (bpi - 1); + et.big_tomb_bits |= @as(u32, 1) << @intCast(u5, big_bit_index); + } + } + } + + fn finish(et: *ExtraTombs) !void { + et.tomb_bits |= @as(Bpi, @boolToInt(et.main_tomb)) << (bpi - 1); + et.analysis.storeTombBits(et.inst, et.tomb_bits); + try et.analysis.special.put(et.analysis.gpa, et.inst, et.big_tomb_bits); + } +}; diff --git a/src/Sema.zig b/src/Sema.zig index 777619dc48..79f1ed0614 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -258,24 +258,24 @@ pub fn analyzeBody( .slice_sentinel => try sema.zirSliceSentinel(block, inst), .slice_start => try sema.zirSliceStart(block, inst), .str => try sema.zirStr(block, inst), - //.switch_block => try sema.zirSwitchBlock(block, inst, false, .none), - //.switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none), - //.switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"), - //.switch_block_else_multi => try sema.zirSwitchBlockMulti(block, inst, false, .@"else"), - //.switch_block_under => try sema.zirSwitchBlock(block, inst, false, .under), - //.switch_block_under_multi => try sema.zirSwitchBlockMulti(block, inst, false, .under), - //.switch_block_ref => try sema.zirSwitchBlock(block, inst, true, .none), - //.switch_block_ref_multi => try sema.zirSwitchBlockMulti(block, inst, true, .none), - //.switch_block_ref_else => try sema.zirSwitchBlock(block, inst, true, .@"else"), - //.switch_block_ref_else_multi => try sema.zirSwitchBlockMulti(block, inst, true, .@"else"), - //.switch_block_ref_under => try sema.zirSwitchBlock(block, inst, true, .under), - //.switch_block_ref_under_multi => try sema.zirSwitchBlockMulti(block, inst, true, .under), - //.switch_capture => try sema.zirSwitchCapture(block, inst, false, false), - //.switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), - //.switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), - //.switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), - //.switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), - //.switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), + .switch_block => try sema.zirSwitchBlock(block, inst, false, .none), + .switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none), + .switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"), + .switch_block_else_multi => try sema.zirSwitchBlockMulti(block, inst, false, .@"else"), + .switch_block_under => try sema.zirSwitchBlock(block, inst, false, .under), + .switch_block_under_multi => try sema.zirSwitchBlockMulti(block, inst, false, .under), + .switch_block_ref => try sema.zirSwitchBlock(block, inst, true, .none), + .switch_block_ref_multi => try sema.zirSwitchBlockMulti(block, inst, true, .none), + .switch_block_ref_else => try sema.zirSwitchBlock(block, inst, true, .@"else"), + .switch_block_ref_else_multi => try sema.zirSwitchBlockMulti(block, inst, true, .@"else"), + .switch_block_ref_under => try sema.zirSwitchBlock(block, inst, true, .under), + .switch_block_ref_under_multi => try sema.zirSwitchBlockMulti(block, inst, true, .under), + .switch_capture => try sema.zirSwitchCapture(block, inst, false, false), + .switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), + .switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), + .switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), + .switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), + .switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), .type_info => try sema.zirTypeInfo(block, inst), .size_of => try sema.zirSizeOf(block, inst), .bit_size_of => try sema.zirBitSizeOf(block, inst), @@ -534,7 +534,6 @@ pub fn analyzeBody( return break_inst; } }, - else => |t| @panic(@tagName(t)), }; if (sema.typeOf(air_inst).isNoReturn()) return always_noreturn; @@ -4110,8 +4109,8 @@ fn analyzeSwitch( const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body_len; + const item = sema.resolveInst(item_ref); // Validation above ensured these will succeed. - const item = sema.resolveInst(item_ref) catch unreachable; const item_val = sema.resolveConstValue(&child_block, .unneeded, item) catch unreachable; if (operand_val.eql(item_val)) { return sema.resolveBlockBody(block, src, &child_block, body, merges); @@ -4132,9 +4131,9 @@ fn analyzeSwitch( const body = sema.code.extra[extra_index + 2 * ranges_len ..][0..body_len]; for (items) |item_ref| { + const item = sema.resolveInst(item_ref); // Validation above ensured these will succeed. - const item = sema.resolveInst(item_ref) catch unreachable; - const item_val = sema.resolveConstValue(&child_block, item.src, item) catch unreachable; + const item_val = sema.resolveConstValue(&child_block, .unneeded, item) catch unreachable; if (operand_val.eql(item_val)) { return sema.resolveBlockBody(block, src, &child_block, body, merges); } @@ -4171,156 +4170,157 @@ fn analyzeSwitch( // TODO when reworking AIR memory layout make multi cases get generated as cases, // not as part of the "else" block. - const cases = try sema.arena.alloc(Inst.SwitchBr.Case, scalar_cases_len); + return mod.fail(&block.base, src, "TODO rework runtime switch Sema", .{}); + //const cases = try sema.arena.alloc(Inst.SwitchBr.Case, scalar_cases_len); - var case_block = child_block.makeSubBlock(); - case_block.runtime_loop = null; - case_block.runtime_cond = operand.src; - case_block.runtime_index += 1; - defer case_block.instructions.deinit(gpa); + //var case_block = child_block.makeSubBlock(); + //case_block.runtime_loop = null; + //case_block.runtime_cond = operand.src; + //case_block.runtime_index += 1; + //defer case_block.instructions.deinit(gpa); - var extra_index: usize = special.end; + //var extra_index: usize = special.end; - var scalar_i: usize = 0; - while (scalar_i < scalar_cases_len) : (scalar_i += 1) { - const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); - extra_index += 1; - const body_len = sema.code.extra[extra_index]; - extra_index += 1; - const body = sema.code.extra[extra_index..][0..body_len]; - extra_index += body_len; - - case_block.instructions.shrinkRetainingCapacity(0); - // We validate these above; these two calls are guaranteed to succeed. - const item = sema.resolveInst(item_ref) catch unreachable; - const item_val = sema.resolveConstValue(&case_block, .unneeded, item) catch unreachable; - - _ = try sema.analyzeBody(&case_block, body); - - cases[scalar_i] = .{ - .item = item_val, - .body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items) }, - }; - } - - var first_else_body: Body = undefined; - var prev_condbr: ?*Inst.CondBr = null; - - var multi_i: usize = 0; - while (multi_i < multi_cases_len) : (multi_i += 1) { - const items_len = sema.code.extra[extra_index]; - extra_index += 1; - const ranges_len = sema.code.extra[extra_index]; - extra_index += 1; - const body_len = sema.code.extra[extra_index]; - extra_index += 1; - const items = sema.code.refSlice(extra_index, items_len); - extra_index += items_len; - - case_block.instructions.shrinkRetainingCapacity(0); - - var any_ok: ?Air.Inst.Index = null; - - for (items) |item_ref| { - const item = sema.resolveInst(item_ref); - _ = try sema.resolveConstValue(&child_block, item.src, item); - - const cmp_ok = try case_block.addBinOp(.cmp_eq, operand, item); - if (any_ok) |some| { - any_ok = try case_block.addBinOp(.bool_or, some, cmp_ok); - } else { - any_ok = cmp_ok; - } - } - - var range_i: usize = 0; - while (range_i < ranges_len) : (range_i += 1) { - const first_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); - extra_index += 1; - const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); - extra_index += 1; - - const item_first = sema.resolveInst(first_ref); - const item_last = sema.resolveInst(last_ref); - - _ = try sema.resolveConstValue(&child_block, item_first.src, item_first); - _ = try sema.resolveConstValue(&child_block, item_last.src, item_last); + //var scalar_i: usize = 0; + //while (scalar_i < scalar_cases_len) : (scalar_i += 1) { + // const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); + // extra_index += 1; + // const body_len = sema.code.extra[extra_index]; + // extra_index += 1; + // const body = sema.code.extra[extra_index..][0..body_len]; + // extra_index += body_len; - // operand >= first and operand <= last - const range_first_ok = try case_block.addBinOp( - .cmp_gte, - operand, - item_first, - ); - const range_last_ok = try case_block.addBinOp( - .cmp_lte, - operand, - item_last, - ); - const range_ok = try case_block.addBinOp( - .bool_and, - range_first_ok, - range_last_ok, - ); - if (any_ok) |some| { - any_ok = try case_block.addBinOp(.bool_or, some, range_ok); - } else { - any_ok = range_ok; - } - } + // case_block.instructions.shrinkRetainingCapacity(0); + // const item = sema.resolveInst(item_ref); + // // We validate these above; these two calls are guaranteed to succeed. + // const item_val = sema.resolveConstValue(&case_block, .unneeded, item) catch unreachable; - const new_condbr = try sema.arena.create(Inst.CondBr); - new_condbr.* = .{ - .base = .{ - .tag = .condbr, - .ty = Type.initTag(.noreturn), - .src = src, - }, - .condition = any_ok.?, - .then_body = undefined, - .else_body = undefined, - }; - try case_block.instructions.append(gpa, &new_condbr.base); + // _ = try sema.analyzeBody(&case_block, body); - const cond_body: Body = .{ - .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), - }; + // cases[scalar_i] = .{ + // .item = item_val, + // .body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items) }, + // }; + //} - case_block.instructions.shrinkRetainingCapacity(0); - const body = sema.code.extra[extra_index..][0..body_len]; - extra_index += body_len; - _ = try sema.analyzeBody(&case_block, body); - new_condbr.then_body = .{ - .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), - }; - if (prev_condbr) |condbr| { - condbr.else_body = cond_body; - } else { - first_else_body = cond_body; - } - prev_condbr = new_condbr; - } - - const final_else_body: Body = blk: { - if (special.body.len != 0) { - case_block.instructions.shrinkRetainingCapacity(0); - _ = try sema.analyzeBody(&case_block, special.body); - const else_body: Body = .{ - .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), - }; - if (prev_condbr) |condbr| { - condbr.else_body = else_body; - break :blk first_else_body; - } else { - break :blk else_body; - } - } else { - break :blk .{ .instructions = &.{} }; - } - }; + //var first_else_body: Body = undefined; + //var prev_condbr: ?*Inst.CondBr = null; - _ = try child_block.addSwitchBr(src, operand, cases, final_else_body); - return sema.analyzeBlockBody(block, src, &child_block, merges); + //var multi_i: usize = 0; + //while (multi_i < multi_cases_len) : (multi_i += 1) { + // const items_len = sema.code.extra[extra_index]; + // extra_index += 1; + // const ranges_len = sema.code.extra[extra_index]; + // extra_index += 1; + // const body_len = sema.code.extra[extra_index]; + // extra_index += 1; + // const items = sema.code.refSlice(extra_index, items_len); + // extra_index += items_len; + + // case_block.instructions.shrinkRetainingCapacity(0); + + // var any_ok: ?Air.Inst.Index = null; + + // for (items) |item_ref| { + // const item = sema.resolveInst(item_ref); + // _ = try sema.resolveConstValue(&child_block, item.src, item); + + // const cmp_ok = try case_block.addBinOp(.cmp_eq, operand, item); + // if (any_ok) |some| { + // any_ok = try case_block.addBinOp(.bool_or, some, cmp_ok); + // } else { + // any_ok = cmp_ok; + // } + // } + + // var range_i: usize = 0; + // while (range_i < ranges_len) : (range_i += 1) { + // const first_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); + // extra_index += 1; + // const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); + // extra_index += 1; + + // const item_first = sema.resolveInst(first_ref); + // const item_last = sema.resolveInst(last_ref); + + // _ = try sema.resolveConstValue(&child_block, item_first.src, item_first); + // _ = try sema.resolveConstValue(&child_block, item_last.src, item_last); + + // // operand >= first and operand <= last + // const range_first_ok = try case_block.addBinOp( + // .cmp_gte, + // operand, + // item_first, + // ); + // const range_last_ok = try case_block.addBinOp( + // .cmp_lte, + // operand, + // item_last, + // ); + // const range_ok = try case_block.addBinOp( + // .bool_and, + // range_first_ok, + // range_last_ok, + // ); + // if (any_ok) |some| { + // any_ok = try case_block.addBinOp(.bool_or, some, range_ok); + // } else { + // any_ok = range_ok; + // } + // } + + // const new_condbr = try sema.arena.create(Inst.CondBr); + // new_condbr.* = .{ + // .base = .{ + // .tag = .condbr, + // .ty = Type.initTag(.noreturn), + // .src = src, + // }, + // .condition = any_ok.?, + // .then_body = undefined, + // .else_body = undefined, + // }; + // try case_block.instructions.append(gpa, &new_condbr.base); + + // const cond_body: Body = .{ + // .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), + // }; + + // case_block.instructions.shrinkRetainingCapacity(0); + // const body = sema.code.extra[extra_index..][0..body_len]; + // extra_index += body_len; + // _ = try sema.analyzeBody(&case_block, body); + // new_condbr.then_body = .{ + // .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), + // }; + // if (prev_condbr) |condbr| { + // condbr.else_body = cond_body; + // } else { + // first_else_body = cond_body; + // } + // prev_condbr = new_condbr; + //} + + //const final_else_body: Body = blk: { + // if (special.body.len != 0) { + // case_block.instructions.shrinkRetainingCapacity(0); + // _ = try sema.analyzeBody(&case_block, special.body); + // const else_body: Body = .{ + // .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), + // }; + // if (prev_condbr) |condbr| { + // condbr.else_body = else_body; + // break :blk first_else_body; + // } else { + // break :blk else_body; + // } + // } else { + // break :blk .{ .instructions = &.{} }; + // } + //}; + + //_ = try child_block.addSwitchBr(src, operand, cases, final_else_body); + //return sema.analyzeBlockBody(block, src, &child_block, merges); } fn resolveSwitchItemVal( @@ -4332,16 +4332,17 @@ fn resolveSwitchItemVal( range_expand: Module.SwitchProngSrc.RangeExpand, ) CompileError!TypedValue { const item = sema.resolveInst(item_ref); + const item_ty = sema.typeOf(item); // Constructing a LazySrcLoc is costly because we only have the switch AST node. // Only if we know for sure we need to report a compile error do we resolve the // full source locations. if (sema.resolveConstValue(block, .unneeded, item)) |val| { - return TypedValue{ .ty = item.ty, .val = val }; + return TypedValue{ .ty = item_ty, .val = val }; } else |err| switch (err) { error.NeededSourceLocation => { const src = switch_prong_src.resolve(sema.gpa, block.src_decl, switch_node_offset, range_expand); return TypedValue{ - .ty = item.ty, + .ty = item_ty, .val = try sema.resolveConstValue(block, src, item), }; }, diff --git a/src/codegen.zig b/src/codegen.zig index bc22d7ec19..11a2603aac 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -452,6 +452,43 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, }; + const BigTomb = struct { + function: *Self, + inst: Air.Inst.Index, + tomb_bits: Liveness.Bpi, + big_tomb_bits: u32, + bit_index: usize, + + fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) void { + const this_bit_index = bt.bit_index; + bt.bit_index += 1; + + const op_int = @enumToInt(op_ref); + if (op_int < Air.Inst.Ref.typed_value_map.len) return; + const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + + if (this_bit_index < Liveness.bpi - 1) { + const dies = @truncate(u1, bt.tomb_bits >> @intCast(Liveness.OperandInt, this_bit_index)) != 0; + if (!dies) return; + } else { + const big_bit_index = @intCast(u5, this_bit_index - (Liveness.bpi - 1)); + const dies = @truncate(u1, bt.big_tomb_bits >> big_bit_index) != 0; + if (!dies) return; + } + bt.function.processDeath(op_index); + } + + fn finishAir(bt: *BigTomb, result: MCValue) void { + const is_used = !bt.function.liveness.isUnused(bt.inst); + if (is_used) { + log.debug("{} => {}", .{ bt.inst, result }); + const branch = &bt.function.branch_stack.items[bt.function.branch_stack.items.len - 1]; + branch.inst_table.putAssumeCapacityNoClobber(bt.inst, result); + } + bt.function.finishAirBookkeeping(); + } + }; + const Self = @This(); fn generate( @@ -921,8 +958,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (!dies) continue; const op_int = @enumToInt(op); if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const operand: Air.Inst.Index = op_int - @intCast(u32, Air.Inst.Ref.typed_value_map.len); - self.processDeath(operand); + const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; if (is_used) { @@ -2739,7 +2776,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { std.mem.copy(Air.Inst.Ref, buf[1..], args); return self.finishAir(inst, result, buf); } - @panic("TODO: codegen for function call with greater than 2 args"); + var bt = try self.iterateBigTomb(inst, 1 + args.len); + bt.feed(callee); + for (args) |arg| { + bt.feed(arg); + } + return bt.finishAir(result); } fn airRef(self: *Self, inst: Air.Inst.Index) !void { @@ -3651,7 +3693,25 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { std.mem.copy(Air.Inst.Ref, buf[outputs.len..], args); return self.finishAir(inst, result, buf); } - @panic("TODO: codegen for asm with greater than 3 args"); + var bt = try self.iterateBigTomb(inst, outputs.len + args.len); + for (outputs) |output| { + bt.feed(output); + } + for (args) |arg| { + bt.feed(arg); + } + return bt.finishAir(result); + } + + fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigTomb { + try self.ensureProcessDeathCapacity(operand_count + 1); + return BigTomb{ + .function = self, + .inst = inst, + .tomb_bits = self.liveness.getTombBits(inst), + .big_tomb_bits = self.liveness.special.get(inst) orelse 0, + .bit_index = 0, + }; } /// Sets the value without any modifications to register allocation metadata or stack allocation metadata. @@ -4492,7 +4552,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // First section of indexes correspond to a set number of constant values. const ref_int = @enumToInt(inst); if (ref_int < Air.Inst.Ref.typed_value_map.len) { - return self.genTypedValue(Air.Inst.Ref.typed_value_map[ref_int]); + const tv = Air.Inst.Ref.typed_value_map[ref_int]; + if (!tv.ty.hasCodeGenBits()) { + return MCValue{ .none = {} }; + } + return self.genTypedValue(tv); } // If the type has no codegen bits, no need to store it. diff --git a/src/print_air.zig b/src/print_air.zig index 44c170a078..76159d0796 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -89,7 +89,7 @@ const Writer = struct { if (w.liveness.isUnused(inst)) { try s.writeAll(") unused\n"); } else { - try s.writeAll("\n"); + try s.writeAll(")\n"); } } } -- cgit v1.2.3 From a97e5e119afb80e0d6d047682b8301bab9423078 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 19 Jul 2021 18:58:51 -0700 Subject: stage2: switch: fix Sema bugs and implement AIR printing --- src/Liveness.zig | 10 +++++----- src/Sema.zig | 9 ++++++--- src/print_air.zig | 47 +++++++++++++++++++++++++++++------------------ 3 files changed, 40 insertions(+), 26 deletions(-) (limited to 'src/Liveness.zig') diff --git a/src/Liveness.zig b/src/Liveness.zig index 2c226122bf..02d0ea7bc5 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -442,9 +442,9 @@ fn analyzeInst( return trackOperands(a, new_set, inst, main_tomb, .{ condition, .none, .none }); }, .switch_br => { - const inst_data = inst_datas[inst].pl_op; - const condition = inst_data.operand; - const switch_br = a.air.extraData(Air.SwitchBr, inst_data.payload); + const pl_op = inst_datas[inst].pl_op; + const condition = pl_op.operand; + const switch_br = a.air.extraData(Air.SwitchBr, pl_op.payload); const Table = std.AutoHashMapUnmanaged(Air.Inst.Index, void); const case_tables = try gpa.alloc(Table, switch_br.data.cases_len + 1); // +1 for else @@ -456,8 +456,8 @@ fn analyzeInst( var air_extra_index: usize = switch_br.end; for (case_tables[0..switch_br.data.cases_len]) |*case_table| { const case = a.air.extraData(Air.SwitchBr.Case, air_extra_index); - const case_body = a.air.extra[case.end..][0..case.data.body_len]; - air_extra_index = case.end + case_body.len; + const case_body = a.air.extra[case.end + case.data.items_len ..][0..case.data.body_len]; + air_extra_index = case.end + case.data.items_len + case_body.len; try analyzeWithContext(a, case_table, case_body); // Reset the table back to its state from before the case. diff --git a/src/Sema.zig b/src/Sema.zig index b9449157e2..826097c3d1 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -4213,6 +4213,7 @@ fn analyzeSwitch( var prev_then_body: []const Air.Inst.Index = &.{}; defer gpa.free(prev_then_body); + var cases_len = scalar_cases_len; var multi_i: usize = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; @@ -4232,6 +4233,8 @@ fn analyzeSwitch( // else prong. Otherwise, we can take advantage of multiple items // mapping to the same body. if (ranges_len == 0) { + cases_len += 1; + const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body_len; _ = try sema.analyzeBody(&case_block, body); @@ -4239,7 +4242,7 @@ fn analyzeSwitch( try cases_extra.ensureUnusedCapacity(gpa, 2 + items.len + case_block.instructions.items.len); - cases_extra.appendAssumeCapacity(1); // items_len + cases_extra.appendAssumeCapacity(@intCast(u32, items.len)); cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); for (items) |item_ref| { @@ -4352,12 +4355,12 @@ fn analyzeSwitch( } try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr).Struct.fields.len + - cases_extra.items.len); + cases_extra.items.len + final_else_body.len); _ = try child_block.addInst(.{ .tag = .switch_br, .data = .{ .pl_op = .{ .operand = operand, .payload = sema.addExtraAssumeCapacity(Air.SwitchBr{ - .cases_len = @intCast(u32, scalar_cases_len + multi_cases_len), + .cases_len = @intCast(u32, cases_len), .else_body_len = @intCast(u32, final_else_body.len), }), } } }); diff --git a/src/print_air.zig b/src/print_air.zig index f31b307b57..51f0ce4f49 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -300,33 +300,44 @@ const Writer = struct { fn writeSwitchBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const pl_op = w.air.instructions.items(.data)[inst].pl_op; - const extra = w.air.extraData(Air.SwitchBr, pl_op.payload); - const cases = w.air.extra[extra.end..][0..extra.data.cases_len]; - const else_body = w.air.extra[extra.end + cases.len ..][0..extra.data.else_body_len]; + const switch_br = w.air.extraData(Air.SwitchBr, pl_op.payload); + var extra_index: usize = switch_br.end; + var case_i: u32 = 0; try w.writeInstRef(s, pl_op.operand); - try s.writeAll(", {\n"); - const old_indent = w.indent; - if (else_body.len != 0) { - w.indent += 2; - try w.writeBody(s, else_body); - try s.writeByteNTimes(' ', old_indent); - try s.writeAll("}, {\n"); - w.indent = old_indent; - } + w.indent += 2; - for (cases) |case_index| { - const case = w.air.extraData(Air.SwitchBr.Case, case_index); - const case_body = w.air.extra[case.end..][0..case.data.body_len]; + while (case_i < switch_br.data.cases_len) : (case_i += 1) { + const case = w.air.extraData(Air.SwitchBr.Case, extra_index); + const items = @bitCast([]const Air.Inst.Ref, w.air.extra[case.end..][0..case.data.items_len]); + const case_body = w.air.extra[case.end + items.len ..][0..case.data.body_len]; + extra_index = case.end + case.data.items_len + case_body.len; + try s.writeAll(", ["); + for (items) |item, item_i| { + if (item_i != 0) try s.writeAll(", "); + try w.writeInstRef(s, item); + } + try s.writeAll("] => {\n"); w.indent += 2; try w.writeBody(s, case_body); - try s.writeByteNTimes(' ', old_indent); - try s.writeAll("}, {\n"); - w.indent = old_indent; + w.indent -= 2; + try s.writeByteNTimes(' ', w.indent); + try s.writeAll("}"); + } + + const else_body = w.air.extra[extra_index..][0..switch_br.data.else_body_len]; + if (else_body.len != 0) { + try s.writeAll(", else => {\n"); + w.indent += 2; + try w.writeBody(s, else_body); + w.indent -= 2; + try s.writeByteNTimes(' ', w.indent); + try s.writeAll("}"); } + try s.writeAll("\n"); try s.writeByteNTimes(' ', old_indent); try s.writeAll("}"); } -- cgit v1.2.3 From 91c4e28c5102223917ccf270fd466b796e0e0587 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 20 Jul 2021 16:04:46 -0700 Subject: Liveness: fix br instruction not tracking its operand --- src/Liveness.zig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'src/Liveness.zig') diff --git a/src/Liveness.zig b/src/Liveness.zig index 02d0ea7bc5..2039dd7146 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -250,7 +250,6 @@ fn analyzeInst( .arg, .alloc, - .br, .constant, .const_ty, .breakpoint, @@ -321,6 +320,10 @@ fn analyzeInst( const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ extra.struct_ptr, .none, .none }); }, + .br => { + const br = inst_datas[inst].br; + return trackOperands(a, new_set, inst, main_tomb, .{ br.operand, .none, .none }); + }, .assembly => { const extra = a.air.extraData(Air.Asm, inst_datas[inst].ty_pl.payload); const extended = a.zir.instructions.items(.data)[extra.data.zir_index].extended; -- cgit v1.2.3