aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Air.zig214
-rw-r--r--src/Compilation.zig3
-rw-r--r--src/Liveness.zig1841
-rw-r--r--src/Liveness/Verify.zig610
-rw-r--r--src/Module.zig33
-rw-r--r--src/arch/aarch64/CodeGen.zig11
-rw-r--r--src/arch/arm/CodeGen.zig11
-rw-r--r--src/arch/riscv64/CodeGen.zig5
-rw-r--r--src/arch/sparc64/CodeGen.zig11
-rw-r--r--src/arch/wasm/CodeGen.zig99
-rw-r--r--src/arch/x86_64/CodeGen.zig3160
-rw-r--r--src/codegen/c.zig532
-rw-r--r--src/codegen/llvm.zig193
-rw-r--r--src/codegen/spirv.zig5
-rw-r--r--src/print_air.zig103
-rw-r--r--src/register_manager.zig4
16 files changed, 3508 insertions, 3327 deletions
diff --git a/src/Air.zig b/src/Air.zig
index b8771b2a61..19ba576a5f 100644
--- a/src/Air.zig
+++ b/src/Air.zig
@@ -1375,3 +1375,217 @@ pub fn nullTerminatedString(air: Air, index: usize) [:0]const u8 {
}
return bytes[0..end :0];
}
+
+/// Returns whether the given instruction must always be lowered, for instance because it can cause
+/// side effects. If an instruction does not need to be lowered, and Liveness determines its result
+/// is unused, backends should avoid lowering it.
+pub fn mustLower(air: Air, inst: Air.Inst.Index) bool {
+ const data = air.instructions.items(.data)[inst];
+ return switch (air.instructions.items(.tag)[inst]) {
+ .arg,
+ .block,
+ .loop,
+ .br,
+ .trap,
+ .breakpoint,
+ .call,
+ .call_always_tail,
+ .call_never_tail,
+ .call_never_inline,
+ .cond_br,
+ .switch_br,
+ .@"try",
+ .try_ptr,
+ .dbg_stmt,
+ .dbg_block_begin,
+ .dbg_block_end,
+ .dbg_inline_begin,
+ .dbg_inline_end,
+ .dbg_var_ptr,
+ .dbg_var_val,
+ .ret,
+ .ret_load,
+ .store,
+ .unreach,
+ .optional_payload_ptr_set,
+ .errunion_payload_ptr_set,
+ .set_union_tag,
+ .memset,
+ .memcpy,
+ .cmpxchg_weak,
+ .cmpxchg_strong,
+ .fence,
+ .atomic_store_unordered,
+ .atomic_store_monotonic,
+ .atomic_store_release,
+ .atomic_store_seq_cst,
+ .atomic_rmw,
+ .prefetch,
+ .wasm_memory_grow,
+ .set_err_return_trace,
+ .vector_store_elem,
+ .c_va_arg,
+ .c_va_copy,
+ .c_va_end,
+ .c_va_start,
+ => true,
+
+ .add,
+ .add_optimized,
+ .addwrap,
+ .addwrap_optimized,
+ .add_sat,
+ .sub,
+ .sub_optimized,
+ .subwrap,
+ .subwrap_optimized,
+ .sub_sat,
+ .mul,
+ .mul_optimized,
+ .mulwrap,
+ .mulwrap_optimized,
+ .mul_sat,
+ .div_float,
+ .div_float_optimized,
+ .div_trunc,
+ .div_trunc_optimized,
+ .div_floor,
+ .div_floor_optimized,
+ .div_exact,
+ .div_exact_optimized,
+ .rem,
+ .rem_optimized,
+ .mod,
+ .mod_optimized,
+ .ptr_add,
+ .ptr_sub,
+ .max,
+ .min,
+ .add_with_overflow,
+ .sub_with_overflow,
+ .mul_with_overflow,
+ .shl_with_overflow,
+ .alloc,
+ .ret_ptr,
+ .bit_and,
+ .bit_or,
+ .shr,
+ .shr_exact,
+ .shl,
+ .shl_exact,
+ .shl_sat,
+ .xor,
+ .not,
+ .bitcast,
+ .ret_addr,
+ .frame_addr,
+ .clz,
+ .ctz,
+ .popcount,
+ .byte_swap,
+ .bit_reverse,
+ .sqrt,
+ .sin,
+ .cos,
+ .tan,
+ .exp,
+ .exp2,
+ .log,
+ .log2,
+ .log10,
+ .fabs,
+ .floor,
+ .ceil,
+ .round,
+ .trunc_float,
+ .neg,
+ .neg_optimized,
+ .cmp_lt,
+ .cmp_lt_optimized,
+ .cmp_lte,
+ .cmp_lte_optimized,
+ .cmp_eq,
+ .cmp_eq_optimized,
+ .cmp_gte,
+ .cmp_gte_optimized,
+ .cmp_gt,
+ .cmp_gt_optimized,
+ .cmp_neq,
+ .cmp_neq_optimized,
+ .cmp_vector,
+ .cmp_vector_optimized,
+ .constant,
+ .const_ty,
+ .is_null,
+ .is_non_null,
+ .is_null_ptr,
+ .is_non_null_ptr,
+ .is_err,
+ .is_non_err,
+ .is_err_ptr,
+ .is_non_err_ptr,
+ .bool_and,
+ .bool_or,
+ .ptrtoint,
+ .bool_to_int,
+ .fptrunc,
+ .fpext,
+ .intcast,
+ .trunc,
+ .optional_payload,
+ .optional_payload_ptr,
+ .wrap_optional,
+ .unwrap_errunion_payload,
+ .unwrap_errunion_err,
+ .unwrap_errunion_payload_ptr,
+ .unwrap_errunion_err_ptr,
+ .wrap_errunion_payload,
+ .wrap_errunion_err,
+ .struct_field_ptr,
+ .struct_field_ptr_index_0,
+ .struct_field_ptr_index_1,
+ .struct_field_ptr_index_2,
+ .struct_field_ptr_index_3,
+ .struct_field_val,
+ .get_union_tag,
+ .slice,
+ .slice_len,
+ .slice_ptr,
+ .ptr_slice_len_ptr,
+ .ptr_slice_ptr_ptr,
+ .array_elem_val,
+ .slice_elem_ptr,
+ .ptr_elem_ptr,
+ .array_to_slice,
+ .float_to_int,
+ .float_to_int_optimized,
+ .int_to_float,
+ .reduce,
+ .reduce_optimized,
+ .splat,
+ .shuffle,
+ .select,
+ .is_named_enum_value,
+ .tag_name,
+ .error_name,
+ .error_set_has_value,
+ .aggregate_init,
+ .union_init,
+ .mul_add,
+ .field_parent_ptr,
+ .wasm_memory_size,
+ .cmp_lt_errors_len,
+ .err_return_trace,
+ .addrspace_cast,
+ .save_err_return_trace_index,
+ .work_item_id,
+ .work_group_size,
+ .work_group_id,
+ => false,
+
+ .assembly => @truncate(u1, air.extraData(Air.Asm, data.ty_pl.payload).data.flags >> 31) != 0,
+ .load => air.typeOf(data.ty_op.operand).isVolatilePtr(),
+ .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs).isVolatilePtr(),
+ .atomic_load => air.typeOf(data.atomic_load.ptr).isVolatilePtr(),
+ };
+}
diff --git a/src/Compilation.zig b/src/Compilation.zig
index a07ac417e3..00b691f780 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -3095,6 +3095,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v
.file_failure,
.sema_failure,
+ .liveness_failure,
.codegen_failure,
.dependency_failure,
.sema_failure_retryable,
@@ -3145,7 +3146,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v
// emit-h only requires semantic analysis of the Decl to be complete,
// it does not depend on machine code generation to succeed.
- .codegen_failure, .codegen_failure_retryable, .complete => {
+ .liveness_failure, .codegen_failure, .codegen_failure_retryable, .complete => {
const named_frame = tracy.namedFrame("emit_h_decl");
defer named_frame.end();
diff --git a/src/Liveness.zig b/src/Liveness.zig
index 6412792037..6c0a799476 100644
--- a/src/Liveness.zig
+++ b/src/Liveness.zig
@@ -14,6 +14,8 @@ const Allocator = std.mem.Allocator;
const Air = @import("Air.zig");
const Log2Int = std.math.Log2Int;
+pub const Verify = @import("Liveness/Verify.zig");
+
/// This array is split into sets of 4 bits per AIR instruction.
/// The MSB (0bX000) is whether the instruction is unreferenced.
/// The LSB (0b000X) is the first operand, and so on, up to 3 operands. A set bit means the
@@ -24,8 +26,10 @@ tomb_bits: []usize,
/// Sparse table of specially handled instructions. The value is an index into the `extra`
/// array. The meaning of the data depends on the AIR tag.
/// * `cond_br` - points to a `CondBr` in `extra` at this index.
+/// * `try`, `try_ptr` - points to a `CondBr` in `extra` at this index. The error path (the block
+/// in the instruction) is considered the "else" path, and the rest of the block the "then".
/// * `switch_br` - points to a `SwitchBr` in `extra` at this index.
-/// * `loop` - points to a `Loop` in `extra` at this index.
+/// * `block` - points to a `Block` in `extra` at this index.
/// * `asm`, `call`, `aggregate_init` - the value is a set of bits which are the extra tomb
/// bits of operands.
/// The main tomb bits are still used and the extra ones are starting with the lsb of the
@@ -52,11 +56,88 @@ pub const SwitchBr = struct {
else_death_count: u32,
};
-/// Trailing is the set of instructions whose lifetimes end at the end of the loop body.
-pub const Loop = struct {
+/// Trailing is the set of instructions which die in the block. Note that these are not additional
+/// deaths (they are all recorded as normal within the block), but backends may use this information
+/// as a more efficient way to track which instructions are still alive after a block.
+pub const Block = struct {
death_count: u32,
};
+/// Liveness analysis runs in several passes. Each pass iterates backwards over instructions in
+/// bodies, and recurses into bodies.
+const LivenessPass = enum {
+ /// In this pass, we perform some basic analysis of loops to gain information the main pass
+ /// needs. In particular, for every `loop`, we track the following information:
+ /// * Every block which the loop body contains a `br` to.
+ /// * Every operand referenced within the loop body but created outside the loop.
+ /// This gives the main analysis pass enough information to determine the full set of
+ /// instructions which need to be alive when a loop repeats. This data is TEMPORARILY stored in
+ /// `a.extra`. It is not re-added to `extra` by the main pass, since it is not useful to
+ /// backends.
+ loop_analysis,
+
+ /// This pass performs the main liveness analysis, setting up tombs and extra data while
+ /// considering control flow etc.
+ main_analysis,
+};
+
+/// Each analysis pass may wish to pass data through calls. A pointer to a `LivenessPassData(pass)`
+/// stored on the stack is passed through calls to `analyzeInst` etc.
+fn LivenessPassData(comptime pass: LivenessPass) type {
+ return switch (pass) {
+ .loop_analysis => struct {
+ /// The set of blocks which are exited with a `br` instruction at some point within this
+ /// body and which we are currently within.
+ breaks: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{},
+
+ /// The set of operands for which we have seen at least one usage but not their birth.
+ live_set: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{},
+
+ fn deinit(self: *@This(), gpa: Allocator) void {
+ self.breaks.deinit(gpa);
+ self.live_set.deinit(gpa);
+ }
+ },
+
+ .main_analysis => struct {
+ /// Every `block` currently under analysis.
+ block_scopes: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockScope) = .{},
+
+ /// The set of deaths which should be made to occur at the earliest possible point in
+ /// this control flow branch. These instructions die when they are last referenced in
+ /// the current branch; if unreferenced, they die at the start of the branch. Populated
+ /// when a `br` instruction is reached. If deaths are common to all branches of control
+ /// flow, they may be bubbled up to the parent branch.
+ branch_deaths: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{},
+
+ /// The set of instructions currently alive. Instructions which must die in this branch
+ /// (i.e. those in `branch_deaths`) are not in this set, because they must die before
+ /// this point.
+ live_set: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{},
+
+ /// The extra data initialized by the `loop_analysis` pass for this pass to consume.
+ /// Owned by this struct during this pass.
+ old_extra: std.ArrayListUnmanaged(u32) = .{},
+
+ const BlockScope = struct {
+ /// The set of instructions which are alive upon a `br` to this block.
+ live_set: std.AutoHashMapUnmanaged(Air.Inst.Index, void),
+ };
+
+ fn deinit(self: *@This(), gpa: Allocator) void {
+ var it = self.block_scopes.valueIterator();
+ while (it.next()) |block| {
+ block.live_set.deinit(gpa);
+ }
+ self.block_scopes.deinit(gpa);
+ self.branch_deaths.deinit(gpa);
+ self.live_set.deinit(gpa);
+ self.old_extra.deinit(gpa);
+ }
+ },
+ };
+}
+
pub fn analyze(gpa: Allocator, air: Air) Allocator.Error!Liveness {
const tracy = trace(@src());
defer tracy.end();
@@ -64,7 +145,6 @@ pub fn analyze(gpa: Allocator, air: Air) Allocator.Error!Liveness {
var a: Analysis = .{
.gpa = gpa,
.air = air,
- .table = .{},
.tomb_bits = try gpa.alloc(
usize,
(air.instructions.len * bpi + @bitSizeOf(usize) - 1) / @bitSizeOf(usize),
@@ -75,19 +155,27 @@ pub fn analyze(gpa: Allocator, air: Air) Allocator.Error!Liveness {
errdefer gpa.free(a.tomb_bits);
errdefer a.special.deinit(gpa);
defer a.extra.deinit(gpa);
- defer a.table.deinit(gpa);
std.mem.set(usize, a.tomb_bits, 0);
const main_body = air.getMainBody();
- try a.table.ensureTotalCapacity(gpa, @intCast(u32, main_body.len));
- try analyzeWithContext(&a, null, main_body);
+
+ {
+ var data: LivenessPassData(.loop_analysis) = .{};
+ defer data.deinit(gpa);
+ try analyzeBody(&a, .loop_analysis, &data, main_body);
+ }
+
{
- var to_remove: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{};
- defer to_remove.deinit(gpa);
- try removeDeaths(&a, &to_remove, main_body);
+ var data: LivenessPassData(.main_analysis) = .{};
+ defer data.deinit(gpa);
+ data.old_extra = a.extra;
+ a.extra = .{};
+ try analyzeBody(&a, .main_analysis, &data, main_body);
+ assert(data.branch_deaths.count() == 0);
}
- return Liveness{
+
+ return .{
.tomb_bits = a.tomb_bits,
.special = a.special,
.extra = try a.extra.toOwnedSlice(gpa),
@@ -661,18 +749,27 @@ pub fn getSwitchBr(l: Liveness, gpa: Allocator, inst: Air.Inst.Index, cases_len:
};
}
-pub const LoopSlice = struct {
+/// Note that this information is technically redundant, but is useful for
+/// backends nonetheless: see `Block`.
+pub const BlockSlices = struct {
deaths: []const Air.Inst.Index,
};
-pub fn getLoop(l: Liveness, inst: Air.Inst.Index) LoopSlice {
+pub fn getBlock(l: Liveness, inst: Air.Inst.Index) BlockSlices {
const index: usize = l.special.get(inst) orelse return .{
.deaths = &.{},
};
const death_count = l.extra[index];
- return .{ .deaths = l.extra[index + 1 ..][0..death_count] };
+ const deaths = l.extra[index + 1 ..][0..death_count];
+ return .{
+ .deaths = deaths,
+ };
}
+pub const LoopSlice = struct {
+ deaths: []const Air.Inst.Index,
+};
+
pub fn deinit(l: *Liveness, gpa: Allocator) void {
gpa.free(l.tomb_bits);
gpa.free(l.extra);
@@ -687,6 +784,7 @@ pub fn iterateBigTomb(l: Liveness, inst: Air.Inst.Index) BigTomb {
.extra_offset = 0,
.extra = l.extra,
.bit_index = 0,
+ .reached_end = false,
};
}
@@ -702,13 +800,16 @@ pub const BigTomb = struct {
extra_start: u32,
extra_offset: u32,
extra: []const u32,
+ reached_end: bool,
/// Returns whether the next operand dies.
pub fn feed(bt: *BigTomb) bool {
+ if (bt.reached_end) return false;
+
const this_bit_index = bt.bit_index;
bt.bit_index += 1;
- const small_tombs = Liveness.bpi - 1;
+ const small_tombs = bpi - 1;
if (this_bit_index < small_tombs) {
const dies = @truncate(u1, bt.tomb_bits >> @intCast(Liveness.OperandInt, this_bit_index)) != 0;
return dies;
@@ -716,6 +817,10 @@ pub const BigTomb = struct {
const big_bit_index = this_bit_index - small_tombs;
while (big_bit_index - bt.extra_offset * 31 >= 31) {
+ if (@truncate(u1, bt.extra[bt.extra_start + bt.extra_offset] >> 31) != 0) {
+ bt.reached_end = true;
+ return false;
+ }
bt.extra_offset += 1;
}
const dies = @truncate(u1, bt.extra[bt.extra_start + bt.extra_offset] >>
@@ -728,7 +833,6 @@ pub const BigTomb = struct {
const Analysis = struct {
gpa: Allocator,
air: Air,
- table: std.AutoHashMapUnmanaged(Air.Inst.Index, void),
tomb_bits: []usize,
special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32),
extra: std.ArrayListUnmanaged(u32),
@@ -758,46 +862,70 @@ const Analysis = struct {
}
};
-fn analyzeWithContext(
+fn analyzeBody(
a: *Analysis,
- new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void),
+ comptime pass: LivenessPass,
+ data: *LivenessPassData(pass),
body: []const Air.Inst.Index,
) Allocator.Error!void {
var i: usize = body.len;
+ while (i != 0) {
+ i -= 1;
+ const inst = body[i];
+ try analyzeInst(a, pass, data, inst);
+ }
+}
- if (new_set) |ns| {
- // We are only interested in doing this for instructions which are born
- // before a conditional branch, so after obtaining the new set for
- // each branch we prune the instructions which were born within.
- while (i != 0) {
- i -= 1;
- const inst = body[i];
- _ = ns.remove(inst);
- try analyzeInst(a, new_set, inst);
- }
- } else {
- while (i != 0) {
- i -= 1;
- const inst = body[i];
- try analyzeInst(a, new_set, inst);
- }
+const ControlBranchInfo = struct {
+ branch_deaths: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{},
+ live_set: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{},
+};
+
+/// Helper function for running `analyzeBody`, but resetting `branch_deaths` and `live_set` to their
+/// original states before returning, returning the modified versions of them. Only makes sense in
+/// the `main_analysis` pass.
+fn analyzeBodyResetBranch(
+ a: *Analysis,
+ comptime pass: LivenessPass,
+ data: *LivenessPassData(pass),
+ body: []const Air.Inst.Index,
+) !ControlBranchInfo {
+ switch (pass) {
+ .main_analysis => {},
+ else => @compileError("Liveness.analyzeBodyResetBranch only makes sense in LivenessPass.main_analysis"),
+ }
+
+ const gpa = a.gpa;
+
+ const old_branch_deaths = try data.branch_deaths.clone(a.gpa);
+ defer {
+ data.branch_deaths.deinit(gpa);
+ data.branch_deaths = old_branch_deaths;
}
+
+ const old_live_set = try data.live_set.clone(a.gpa);
+ defer {
+ data.live_set.deinit(gpa);
+ data.live_set = old_live_set;
+ }
+
+ try analyzeBody(a, pass, data, body);
+
+ return .{
+ .branch_deaths = data.branch_deaths.move(),
+ .live_set = data.live_set.move(),
+ };
}
fn analyzeInst(
a: *Analysis,
- new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void),
+ comptime pass: LivenessPass,
+ data: *LivenessPassData(pass),
inst: Air.Inst.Index,
) Allocator.Error!void {
- const gpa = a.gpa;
- const table = &a.table;
const inst_tags = a.air.instructions.items(.tag);
const inst_datas = a.air.instructions.items(.data);
- // No tombstone for this instruction means it is never referenced,
- // and its birth marks its own death. Very metal 🤘
- const main_tomb = !table.contains(inst);
-
switch (inst_tags[inst]) {
.add,
.add_optimized,
@@ -861,28 +989,24 @@ fn analyzeInst(
.max,
=> {
const o = inst_datas[inst].bin_op;
- return trackOperands(a, new_set, inst, main_tomb, .{ o.lhs, o.rhs, .none });
+ return analyzeOperands(a, pass, data, inst, .{ o.lhs, o.rhs, .none });
},
.vector_store_elem => {
const o = inst_datas[inst].vector_store_elem;
const extra = a.air.extraData(Air.Bin, o.payload).data;
- return trackOperands(a, new_set, inst, main_tomb, .{ o.vector_ptr, extra.lhs, extra.rhs });
+ return analyzeOperands(a, pass, data, inst, .{ o.vector_ptr, extra.lhs, extra.rhs });
},
.arg,
.alloc,
.ret_ptr,
- .constant,
- .const_ty,
- .trap,
.breakpoint,
.dbg_stmt,
.dbg_inline_begin,
.dbg_inline_end,
.dbg_block_begin,
.dbg_block_end,
- .unreach,
.fence,
.ret_addr,
.frame_addr,
@@ -893,7 +1017,15 @@ fn analyzeInst(
.work_item_id,
.work_group_size,
.work_group_id,
- => return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }),
+ => return analyzeOperands(a, pass, data, inst, .{ .none, .none, .none }),
+
+ .constant,
+ .const_ty,
+ => unreachable,
+
+ .trap,
+ .unreach,
+ => return analyzeFuncEnd(a, pass, data, inst, .{ .none, .none, .none }),
.not,
.bitcast,
@@ -938,7 +1070,7 @@ fn analyzeInst(
.c_va_copy,
=> {
const o = inst_datas[inst].ty_op;
- return trackOperands(a, new_set, inst, main_tomb, .{ o.operand, .none, .none });
+ return analyzeOperands(a, pass, data, inst, .{ o.operand, .none, .none });
},
.is_null,
@@ -951,8 +1083,6 @@ fn analyzeInst(
.is_non_err_ptr,
.ptrtoint,
.bool_to_int,
- .ret,
- .ret_load,
.is_named_enum_value,
.tag_name,
.error_name,
@@ -977,7 +1107,14 @@ fn analyzeInst(
.c_va_end,
=> {
const operand = inst_datas[inst].un_op;
- return trackOperands(a, new_set, inst, main_tomb, .{ operand, .none, .none });
+ return analyzeOperands(a, pass, data, inst, .{ operand, .none, .none });
+ },
+
+ .ret,
+ .ret_load,
+ => {
+ const operand = inst_datas[inst].un_op;
+ return analyzeFuncEnd(a, pass, data, inst, .{ operand, .none, .none });
},
.add_with_overflow,
@@ -992,19 +1129,19 @@ fn analyzeInst(
=> {
const ty_pl = inst_datas[inst].ty_pl;
const extra = a.air.extraData(Air.Bin, ty_pl.payload).data;
- return trackOperands(a, new_set, inst, main_tomb, .{ extra.lhs, extra.rhs, .none });
+ return analyzeOperands(a, pass, data, inst, .{ extra.lhs, extra.rhs, .none });
},
.dbg_var_ptr,
.dbg_var_val,
=> {
const operand = inst_datas[inst].pl_op.operand;
- return trackOperands(a, new_set, inst, main_tomb, .{ operand, .none, .none });
+ return analyzeOperands(a, pass, data, inst, .{ operand, .none, .none });
},
.prefetch => {
const prefetch = inst_datas[inst].prefetch;
- return trackOperands(a, new_set, inst, main_tomb, .{ prefetch.ptr, .none, .none });
+ return analyzeOperands(a, pass, data, inst, .{ prefetch.ptr, .none, .none });
},
.call, .call_always_tail, .call_never_tail, .call_never_inline => {
@@ -1016,37 +1153,35 @@ fn analyzeInst(
var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1);
buf[0] = callee;
std.mem.copy(Air.Inst.Ref, buf[1..], args);
- return trackOperands(a, new_set, inst, main_tomb, buf);
+ return analyzeOperands(a, pass, data, inst, buf);
}
- var extra_tombs: ExtraTombs = .{
- .analysis = a,
- .new_set = new_set,
- .inst = inst,
- .main_tomb = main_tomb,
- };
- defer extra_tombs.deinit();
- try extra_tombs.feed(callee);
- for (args) |arg| {
- try extra_tombs.feed(arg);
+
+ var big = try AnalyzeBigOperands(pass).init(a, data, inst, args.len + 1);
+ defer big.deinit();
+ var i: usize = args.len;
+ while (i > 0) {
+ i -= 1;
+ try big.feed(args[i]);
}
- return extra_tombs.finish();
+ try big.feed(callee);
+ return big.finish();
},
.select => {
const pl_op = inst_datas[inst].pl_op;
const extra = a.air.extraData(Air.Bin, pl_op.payload).data;
- return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.lhs, extra.rhs });
+ return analyzeOperands(a, pass, data, inst, .{ pl_op.operand, extra.lhs, extra.rhs });
},
.shuffle => {
const extra = a.air.extraData(Air.Shuffle, inst_datas[inst].ty_pl.payload).data;
- return trackOperands(a, new_set, inst, main_tomb, .{ extra.a, extra.b, .none });
+ return analyzeOperands(a, pass, data, inst, .{ extra.a, extra.b, .none });
},
.reduce, .reduce_optimized => {
const reduce = inst_datas[inst].reduce;
- return trackOperands(a, new_set, inst, main_tomb, .{ reduce.operand, .none, .none });
+ return analyzeOperands(a, pass, data, inst, .{ reduce.operand, .none, .none });
},
.cmp_vector, .cmp_vector_optimized => {
const extra = a.air.extraData(Air.VectorCmp, inst_datas[inst].ty_pl.payload).data;
- return trackOperands(a, new_set, inst, main_tomb, .{ extra.lhs, extra.rhs, .none });
+ return analyzeOperands(a, pass, data, inst, .{ extra.lhs, extra.rhs, .none });
},
.aggregate_init => {
const ty_pl = inst_datas[inst].ty_pl;
@@ -1057,62 +1192,58 @@ fn analyzeInst(
if (elements.len <= bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1);
std.mem.copy(Air.Inst.Ref, &buf, elements);
- return trackOperands(a, new_set, inst, main_tomb, buf);
+ return analyzeOperands(a, pass, data, inst, buf);
}
- var extra_tombs: ExtraTombs = .{
- .analysis = a,
- .new_set = new_set,
- .inst = inst,
- .main_tomb = main_tomb,
- };
- defer extra_tombs.deinit();
- for (elements) |elem| {
- try extra_tombs.feed(elem);
+
+ var big = try AnalyzeBigOperands(pass).init(a, data, inst, elements.len);
+ defer big.deinit();
+ var i: usize = elements.len;
+ while (i > 0) {
+ i -= 1;
+ try big.feed(elements[i]);
}
- return extra_tombs.finish();
+ return big.finish();
},
.union_init => {
const extra = a.air.extraData(Air.UnionInit, inst_datas[inst].ty_pl.payload).data;
- return trackOperands(a, new_set, inst, main_tomb, .{ extra.init, .none, .none });
+ return analyzeOperands(a, pass, data, inst, .{ extra.init, .none, .none });
},
.struct_field_ptr, .struct_field_val => {
const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data;
- return trackOperands(a, new_set, inst, main_tomb, .{ extra.struct_operand, .none, .none });
+ return analyzeOperands(a, pass, data, inst, .{ extra.struct_operand, .none, .none });
},
.field_parent_ptr => {
const extra = a.air.extraData(Air.FieldParentPtr, inst_datas[inst].ty_pl.payload).data;
- return trackOperands(a, new_set, inst, main_tomb, .{ extra.field_ptr, .none, .none });
+ return analyzeOperands(a, pass, data, inst, .{ extra.field_ptr, .none, .none });
},
.cmpxchg_strong, .cmpxchg_weak => {
const extra = a.air.extraData(Air.Cmpxchg, inst_datas[inst].ty_pl.payload).data;
- return trackOperands(a, new_set, inst, main_tomb, .{ extra.ptr, extra.expected_value, extra.new_value });
+ return analyzeOperands(a, pass, data, inst, .{ extra.ptr, extra.expected_value, extra.new_value });
},
.mul_add => {
const pl_op = inst_datas[inst].pl_op;
const extra = a.air.extraData(Air.Bin, pl_op.payload).data;
- return trackOperands(a, new_set, inst, main_tomb, .{ extra.lhs, extra.rhs, pl_op.operand });
+ return analyzeOperands(a, pass, data, inst, .{ extra.lhs, extra.rhs, pl_op.operand });
},
.atomic_load => {
const ptr = inst_datas[inst].atomic_load.ptr;
- return trackOperands(a, new_set, inst, main_tomb, .{ ptr, .none, .none });
+ return analyzeOperands(a, pass, data, inst, .{ ptr, .none, .none });
},
.atomic_rmw => {
const pl_op = inst_datas[inst].pl_op;
const extra = a.air.extraData(Air.AtomicRmw, pl_op.payload).data;
- return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.operand, .none });
+ return analyzeOperands(a, pass, data, inst, .{ pl_op.operand, extra.operand, .none });
},
.memset,
.memcpy,
=> {
const pl_op = inst_datas[inst].pl_op;
const extra = a.air.extraData(Air.Bin, pl_op.payload).data;
- return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.lhs, extra.rhs });
+ return analyzeOperands(a, pass, data, inst, .{ pl_op.operand, extra.lhs, extra.rhs });
},
- .br => {
- const br = inst_datas[inst].br;
- return trackOperands(a, new_set, inst, main_tomb, .{ br.operand, .none, .none });
- },
+ .br => return analyzeInstBr(a, pass, data, inst),
+
.assembly => {
const extra = a.air.extraData(Air.Asm, inst_datas[inst].ty_pl.payload);
var extra_i: usize = extra.end;
@@ -1121,912 +1252,896 @@ fn analyzeInst(
const inputs = @ptrCast([]const Air.Inst.Ref, a.air.extra[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
- simple: {
+ const num_operands = simple: {
var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1);
var buf_index: usize = 0;
for (outputs) |output| {
if (output != .none) {
- if (buf_index >= buf.len) break :simple;
- buf[buf_index] = output;
+ if (buf_index < buf.len) buf[buf_index] = output;
buf_index += 1;
}
}
- if (buf_index + inputs.len > buf.len) break :simple;
+ if (buf_index + inputs.len > buf.len) {
+ break :simple buf_index + inputs.len;
+ }
std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs);
- return trackOperands(a, new_set, inst, main_tomb, buf);
- }
- var extra_tombs: ExtraTombs = .{
- .analysis = a,
- .new_set = new_set,
- .inst = inst,
- .main_tomb = main_tomb,
+ return analyzeOperands(a, pass, data, inst, buf);
};
- defer extra_tombs.deinit();
- for (outputs) |output| {
- if (output != .none) {
- try extra_tombs.feed(output);
- }
+
+ var big = try AnalyzeBigOperands(pass).init(a, data, inst, num_operands);
+ defer big.deinit();
+ var i: usize = inputs.len;
+ while (i > 0) {
+ i -= 1;
+ try big.feed(inputs[i]);
}
- for (inputs) |input| {
- try extra_tombs.feed(input);
+ i = outputs.len;
+ while (i > 0) {
+ i -= 1;
+ if (outputs[i] != .none) {
+ try big.feed(outputs[i]);
+ }
}
- return extra_tombs.finish();
+ return big.finish();
},
- .block => {
- const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload);
- const body = a.air.extra[extra.end..][0..extra.data.body_len];
- try analyzeWithContext(a, new_set, body);
- return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none });
+
+ .block => return analyzeInstBlock(a, pass, data, inst),
+ .loop => return analyzeInstLoop(a, pass, data, inst),
+
+ .@"try" => return analyzeInstCondBr(a, pass, data, inst, .@"try"),
+ .try_ptr => return analyzeInstCondBr(a, pass, data, inst, .try_ptr),
+ .cond_br => return analyzeInstCondBr(a, pass, data, inst, .cond_br),
+ .switch_br => return analyzeInstSwitchBr(a, pass, data, inst),
+
+ .wasm_memory_grow => {
+ const pl_op = inst_datas[inst].pl_op;
+ return analyzeOperands(a, pass, data, inst, .{ pl_op.operand, .none, .none });
},
- .loop => {
- const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload);
- const body = a.air.extra[extra.end..][0..extra.data.body_len];
+ }
+}
- var body_table: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{};
- defer body_table.deinit(gpa);
+/// Every instruction should hit this (after handling any nested bodies), in every pass. In the
+/// initial pass, it is responsible for marking deaths of the (first three) operands and noticing
+/// immediate deaths.
+fn analyzeOperands(
+ a: *Analysis,
+ comptime pass: LivenessPass,
+ data: *LivenessPassData(pass),
+ inst: Air.Inst.Index,
+ operands: [bpi - 1]Air.Inst.Ref,
+) Allocator.Error!void {
+ const gpa = a.gpa;
+ const inst_tags = a.air.instructions.items(.tag);
- // Instructions outside the loop body cannot die within the loop, since further loop
- // iterations may occur. Track deaths from the loop body - we'll remove all of these
- // retroactively, and add them to our extra data.
+ switch (pass) {
+ .loop_analysis => {
+ _ = data.live_set.remove(inst);
- try analyzeWithContext(a, &body_table, body);
+ for (operands) |op_ref| {
+ const operand = Air.refToIndex(op_ref) orelse continue;
- if (new_set) |ns| {
- try ns.ensureUnusedCapacity(gpa, body_table.count());
- var it = body_table.keyIterator();
- while (it.next()) |key| {
- _ = ns.putAssumeCapacity(key.*, {});
+ // Don't compute any liveness for constants
+ switch (inst_tags[operand]) {
+ .constant, .const_ty => continue,
+ else => {},
}
+
+ _ = try data.live_set.put(gpa, operand, {});
}
+ },
- try a.extra.ensureUnusedCapacity(gpa, std.meta.fields(Loop).len + body_table.count());
- const extra_index = a.addExtraAssumeCapacity(Loop{
- .death_count = body_table.count(),
- });
- {
- var it = body_table.keyIterator();
- while (it.next()) |key| {
- a.extra.appendAssumeCapacity(key.*);
- }
+ .main_analysis => {
+ const usize_index = (inst * bpi) / @bitSizeOf(usize);
+
+ // This logic must synchronize with `will_die_immediately` in `AnalyzeBigOperands.init`.
+ var immediate_death = false;
+ if (data.branch_deaths.remove(inst)) {
+ log.debug("[{}] %{}: resolved branch death to birth (immediate death)", .{ pass, inst });
+ immediate_death = true;
+ assert(!data.live_set.contains(inst));
+ } else if (data.live_set.remove(inst)) {
+ log.debug("[{}] %{}: removed from live set", .{ pass, inst });
+ } else {
+ log.debug("[{}] %{}: immediate death", .{ pass, inst });
+ immediate_death = true;
}
- try a.special.put(gpa, inst, extra_index);
- // We'll remove invalid deaths in a separate pass after main liveness analysis. See
- // removeDeaths for more details.
+ var tomb_bits: Bpi = @as(Bpi, @boolToInt(immediate_death)) << (bpi - 1);
+
+ // If our result is unused and the instruction doesn't need to be lowered, backends will
+ // skip the lowering of this instruction, so we don't want to record uses of operands.
+ // That way, we can mark as many instructions as possible unused.
+ if (!immediate_death or a.air.mustLower(inst)) {
+ // Note that it's important we iterate over the operands backwards, so that if a dying
+ // operand is used multiple times we mark its last use as its death.
+ var i = operands.len;
+ while (i > 0) {
+ i -= 1;
+ const op_ref = operands[i];
+ const operand = Air.refToIndex(op_ref) orelse continue;
+
+ // Don't compute any liveness for constants
+ switch (inst_tags[operand]) {
+ .constant, .const_ty => continue,
+ else => {},
+ }
- return; // Loop has no operands and it is always unreferenced.
- },
- .@"try" => {
- const pl_op = inst_datas[inst].pl_op;
- const extra = a.air.extraData(Air.Try, pl_op.payload);
- const body = a.air.extra[extra.end..][0..extra.data.body_len];
- try analyzeWithContext(a, new_set, body);
- return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, .none, .none });
+ const mask = @as(Bpi, 1) << @intCast(OperandInt, i);
+
+ if ((try data.live_set.fetchPut(gpa, operand, {})) == null) {
+ log.debug("[{}] %{}: added %{} to live set (operand dies here)", .{ pass, inst, operand });
+ tomb_bits |= mask;
+ if (data.branch_deaths.remove(operand)) {
+ log.debug("[{}] %{}: resolved branch death of %{} to this usage", .{ pass, inst, operand });
+ }
+ }
+ }
+ }
+
+ a.tomb_bits[usize_index] |= @as(usize, tomb_bits) <<
+ @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi);
},
- .try_ptr => {
- const extra = a.air.extraData(Air.TryPtr, inst_datas[inst].ty_pl.payload);
- const body = a.air.extra[extra.end..][0..extra.data.body_len];
- try analyzeWithContext(a, new_set, body);
- return trackOperands(a, new_set, inst, main_tomb, .{ extra.data.ptr, .none, .none });
+ }
+}
+
+/// Like `analyzeOperands`, but for an instruction which returns from a function, so should
+/// effectively kill every remaining live value other than its operands.
+fn analyzeFuncEnd(
+ a: *Analysis,
+ comptime pass: LivenessPass,
+ data: *LivenessPassData(pass),
+ inst: Air.Inst.Index,
+ operands: [bpi - 1]Air.Inst.Ref,
+) Allocator.Error!void {
+ switch (pass) {
+ .loop_analysis => {
+ // No operands need to be alive if we're returning from the function, so we don't need
+ // to touch `breaks` here even though this is sort of like a break to the top level.
},
- .cond_br => {
- // Each death that occurs inside one branch, but not the other, needs
- // to be added as a death immediately upon entering the other branch.
- const inst_data = inst_datas[inst].pl_op;
- const condition = inst_data.operand;
- const extra = a.air.extraData(Air.CondBr, inst_data.payload);
- const then_body = a.air.extra[extra.end..][0..extra.data.then_body_len];
- const else_body = a.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
- var then_table: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{};
- defer then_table.deinit(gpa);
- try analyzeWithContext(a, &then_table, then_body);
+ .main_analysis => {
+ const gpa = a.gpa;
- // Reset the table back to its state from before the branch.
- {
- var it = then_table.keyIterator();
- while (it.next()) |key| {
- assert(table.remove(key.*));
- }
+ // Note that we preserve previous branch deaths - anything that needs to die in our
+ // "parent" branch also needs to die for us.
+
+ try data.branch_deaths.ensureUnusedCapacity(gpa, data.live_set.count());
+ var it = data.live_set.keyIterator();
+ while (it.next()) |key| {
+ const alive = key.*;
+ data.branch_deaths.putAssumeCapacity(alive, {});
}
+ data.live_set.clearRetainingCapacity();
+ },
+ }
- var else_table: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{};
- defer else_table.deinit(gpa);
- try analyzeWithContext(a, &else_table, else_body);
+ return analyzeOperands(a, pass, data, inst, operands);
+}
- var then_entry_deaths = std.ArrayList(Air.Inst.Index).init(gpa);
- defer then_entry_deaths.deinit();
- var else_entry_deaths = std.ArrayList(Air.Inst.Index).init(gpa);
- defer else_entry_deaths.deinit();
+fn analyzeInstBr(
+ a: *Analysis,
+ comptime pass: LivenessPass,
+ data: *LivenessPassData(pass),
+ inst: Air.Inst.Index,
+) !void {
+ const inst_datas = a.air.instructions.items(.data);
+ const br = inst_datas[inst].br;
+ const gpa = a.gpa;
- {
- var it = else_table.keyIterator();
- while (it.next()) |key| {
- const else_death = key.*;
- if (!then_table.contains(else_death)) {
- try then_entry_deaths.append(else_death);
- }
- }
- }
- // This loop is the same, except it's for the then branch, and it additionally
- // has to put its items back into the table to undo the reset.
- {
- var it = then_table.keyIterator();
- while (it.next()) |key| {
- const then_death = key.*;
- if (!else_table.contains(then_death)) {
- try else_entry_deaths.append(then_death);
- }
- try table.put(gpa, then_death, {});
- }
+ switch (pass) {
+ .loop_analysis => {
+ try data.breaks.put(gpa, br.block_inst, {});
+ },
+
+ .main_analysis => {
+ const block_scope = data.block_scopes.get(br.block_inst).?; // we should always be breaking from an enclosing block
+
+ // We mostly preserve previous branch deaths - anything that should die for our
+ // enclosing branch should die for us too. However, if our break target requires such an
+ // operand to be alive, it's actually not something we want to kill, since its "last
+ // use" (i.e. the point at which it should die) is outside of our scope.
+ var it = block_scope.live_set.keyIterator();
+ while (it.next()) |key| {
+ const alive = key.*;
+ _ = data.branch_deaths.remove(alive);
}
- // Now we have to correctly populate new_set.
- if (new_set) |ns| {
- try ns.ensureUnusedCapacity(gpa, @intCast(u32, then_table.count() + else_table.count()));
- var it = then_table.keyIterator();
- while (it.next()) |key| {
- _ = ns.putAssumeCapacity(key.*, {});
- }
- it = else_table.keyIterator();
- while (it.next()) |key| {
- _ = ns.putAssumeCapacity(key.*, {});
+ log.debug("[{}] %{}: preserved branch deaths are {}", .{ pass, inst, fmtInstSet(&data.branch_deaths) });
+
+ // Anything that's currently alive but our target doesn't need becomes a branch death.
+ it = data.live_set.keyIterator();
+ while (it.next()) |key| {
+ const alive = key.*;
+ if (!block_scope.live_set.contains(alive)) {
+ _ = try data.branch_deaths.put(gpa, alive, {});
+ log.debug("[{}] %{}: added branch death of {}", .{ pass, inst, alive });
}
}
- const then_death_count = @intCast(u32, then_entry_deaths.items.len);
- const else_death_count = @intCast(u32, else_entry_deaths.items.len);
+ const new_live_set = try block_scope.live_set.clone(gpa);
+ data.live_set.deinit(gpa);
+ data.live_set = new_live_set;
+ },
+ }
- try a.extra.ensureUnusedCapacity(gpa, std.meta.fields(Air.CondBr).len +
- then_death_count + else_death_count);
- const extra_index = a.addExtraAssumeCapacity(CondBr{
- .then_death_count = then_death_count,
- .else_death_count = else_death_count,
- });
- a.extra.appendSliceAssumeCapacity(then_entry_deaths.items);
- a.extra.appendSliceAssumeCapacity(else_entry_deaths.items);
- try a.special.put(gpa, inst, extra_index);
+ return analyzeOperands(a, pass, data, inst, .{ br.operand, .none, .none });
+}
- // Continue on with the instruction analysis. The following code will find the condition
- // instruction, and the deaths flag for the CondBr instruction will indicate whether the
- // condition's lifetime ends immediately before entering any branch.
- return trackOperands(a, new_set, inst, main_tomb, .{ condition, .none, .none });
- },
- .switch_br => {
- const pl_op = inst_datas[inst].pl_op;
- const condition = pl_op.operand;
- const switch_br = a.air.extraData(Air.SwitchBr, pl_op.payload);
+fn analyzeInstBlock(
+ a: *Analysis,
+ comptime pass: LivenessPass,
+ data: *LivenessPassData(pass),
+ inst: Air.Inst.Index,
+) !void {
+ const inst_datas = a.air.instructions.items(.data);
+ const ty_pl = inst_datas[inst].ty_pl;
+ const extra = a.air.extraData(Air.Block, ty_pl.payload);
+ const body = a.air.extra[extra.end..][0..extra.data.body_len];
- const Table = std.AutoHashMapUnmanaged(Air.Inst.Index, void);
- const case_tables = try gpa.alloc(Table, switch_br.data.cases_len + 1); // +1 for else
- defer gpa.free(case_tables);
+ const gpa = a.gpa;
- std.mem.set(Table, case_tables, .{});
- defer for (case_tables) |*ct| ct.deinit(gpa);
+ // We actually want to do `analyzeOperands` *first*, since our result logically doesn't
+ // exist until the block body ends (and we're iterating backwards)
+ try analyzeOperands(a, pass, data, inst, .{ .none, .none, .none });
- var air_extra_index: usize = switch_br.end;
- for (case_tables[0..switch_br.data.cases_len]) |*case_table| {
- const case = a.air.extraData(Air.SwitchBr.Case, air_extra_index);
- const case_body = a.air.extra[case.end + case.data.items_len ..][0..case.data.body_len];
- air_extra_index = case.end + case.data.items_len + case_body.len;
- try analyzeWithContext(a, case_table, case_body);
+ switch (pass) {
+ .loop_analysis => {
+ try analyzeBody(a, pass, data, body);
+ _ = data.breaks.remove(inst);
+ },
- // Reset the table back to its state from before the case.
- var it = case_table.keyIterator();
- while (it.next()) |key| {
- assert(table.remove(key.*));
- }
+ .main_analysis => {
+ log.debug("[{}] %{}: block live set is {}", .{ pass, inst, fmtInstSet(&data.live_set) });
+ try data.block_scopes.put(gpa, inst, .{
+ .live_set = try data.live_set.clone(gpa),
+ });
+ defer {
+ log.debug("[{}] %{}: popped block scope", .{ pass, inst });
+ var scope = data.block_scopes.fetchRemove(inst).?.value;
+ scope.live_set.deinit(gpa);
}
- { // else
- const else_table = &case_tables[case_tables.len - 1];
- const else_body = a.air.extra[air_extra_index..][0..switch_br.data.else_body_len];
- try analyzeWithContext(a, else_table, else_body);
- // Reset the table back to its state from before the case.
- var it = else_table.keyIterator();
- while (it.next()) |key| {
- assert(table.remove(key.*));
- }
- }
+ log.debug("[{}] %{}: pushed new block scope", .{ pass, inst });
+ try analyzeBody(a, pass, data, body);
- const List = std.ArrayListUnmanaged(Air.Inst.Index);
- const case_deaths = try gpa.alloc(List, case_tables.len); // includes else
- defer gpa.free(case_deaths);
+ // If the block is noreturn, block deaths not only aren't useful, they're impossible to
+ // find: there could be more stuff alive after the block than before it!
+ if (!a.air.getRefType(ty_pl.ty).isNoReturn()) {
+ // The block kills the difference in the live sets
+ const block_scope = data.block_scopes.get(inst).?;
+ const num_deaths = data.live_set.count() - block_scope.live_set.count();
- std.mem.set(List, case_deaths, .{});
- defer for (case_deaths) |*cd| cd.deinit(gpa);
+ try a.extra.ensureUnusedCapacity(gpa, num_deaths + std.meta.fields(Block).len);
+ const extra_index = a.addExtraAssumeCapacity(Block{
+ .death_count = num_deaths,
+ });
- var total_deaths: u32 = 0;
- for (case_tables, 0..) |*ct, i| {
- total_deaths += ct.count();
- var it = ct.keyIterator();
+ var measured_num: u32 = 0;
+ var it = data.live_set.keyIterator();
while (it.next()) |key| {
- const case_death = key.*;
- for (case_tables, 0..) |*ct_inner, j| {
- if (i == j) continue;
- if (!ct_inner.contains(case_death)) {
- // instruction is not referenced in this case
- try case_deaths[j].append(gpa, case_death);
- }
+ const alive = key.*;
+ if (!block_scope.live_set.contains(alive)) {
+ // Dies in block
+ a.extra.appendAssumeCapacity(alive);
+ measured_num += 1;
}
- // undo resetting the table
- try table.put(gpa, case_death, {});
}
+ assert(measured_num == num_deaths); // post-live-set should be a subset of pre-live-set
+ try a.special.put(gpa, inst, extra_index);
+ log.debug("[{}] %{}: block deaths are {}", .{
+ pass,
+ inst,
+ fmtInstList(a.extra.items[extra_index + 1 ..][0..num_deaths]),
+ });
}
+ },
+ }
+}
- // Now we have to correctly populate new_set.
- if (new_set) |ns| {
- try ns.ensureUnusedCapacity(gpa, total_deaths);
- for (case_tables) |*ct| {
- var it = ct.keyIterator();
- while (it.next()) |key| {
- _ = ns.putAssumeCapacity(key.*, {});
- }
- }
+fn analyzeInstLoop(
+ a: *Analysis,
+ comptime pass: LivenessPass,
+ data: *LivenessPassData(pass),
+ inst: Air.Inst.Index,
+) !void {
+ const inst_datas = a.air.instructions.items(.data);
+ const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload);
+ const body = a.air.extra[extra.end..][0..extra.data.body_len];
+ const gpa = a.gpa;
+
+ try analyzeOperands(a, pass, data, inst, .{ .none, .none, .none });
+
+ switch (pass) {
+ .loop_analysis => {
+ var old_breaks = data.breaks.move();
+ defer old_breaks.deinit(gpa);
+
+ var old_live = data.live_set.move();
+ defer old_live.deinit(gpa);
+
+ try analyzeBody(a, pass, data, body);
+
+ const num_breaks = data.breaks.count();
+ try a.extra.ensureUnusedCapacity(gpa, 1 + num_breaks);
+
+ const extra_index = @intCast(u32, a.extra.items.len);
+ a.extra.appendAssumeCapacity(num_breaks);
+
+ var it = data.breaks.keyIterator();
+ while (it.next()) |key| {
+ const block_inst = key.*;
+ a.extra.appendAssumeCapacity(block_inst);
}
+ log.debug("[{}] %{}: includes breaks to {}", .{ pass, inst, fmtInstSet(&data.breaks) });
- const else_death_count = @intCast(u32, case_deaths[case_deaths.len - 1].items.len);
- const extra_index = try a.addExtra(SwitchBr{
- .else_death_count = else_death_count,
- });
- for (case_deaths[0 .. case_deaths.len - 1]) |*cd| {
- const case_death_count = @intCast(u32, cd.items.len);
- try a.extra.ensureUnusedCapacity(gpa, 1 + case_death_count + else_death_count);
- a.extra.appendAssumeCapacity(case_death_count);
- a.extra.appendSliceAssumeCapacity(cd.items);
+ // Now we put the live operands from the loop body in too
+ const num_live = data.live_set.count();
+ try a.extra.ensureUnusedCapacity(gpa, 1 + num_live);
+
+ a.extra.appendAssumeCapacity(num_live);
+ it = data.live_set.keyIterator();
+ while (it.next()) |key| {
+ const alive = key.*;
+ a.extra.appendAssumeCapacity(alive);
}
- a.extra.appendSliceAssumeCapacity(case_deaths[case_deaths.len - 1].items);
+ log.debug("[{}] %{}: maintain liveness of {}", .{ pass, inst, fmtInstSet(&data.live_set) });
+
try a.special.put(gpa, inst, extra_index);
- return trackOperands(a, new_set, inst, main_tomb, .{ condition, .none, .none });
- },
- .wasm_memory_grow => {
- const pl_op = inst_datas[inst].pl_op;
- return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, .none, .none });
+ // Add back operands which were previously alive
+ it = old_live.keyIterator();
+ while (it.next()) |key| {
+ const alive = key.*;
+ try data.live_set.put(gpa, alive, {});
+ }
+
+ // And the same for breaks
+ it = old_breaks.keyIterator();
+ while (it.next()) |key| {
+ const block_inst = key.*;
+ try data.breaks.put(gpa, block_inst, {});
+ }
},
- }
-}
-fn trackOperands(
- a: *Analysis,
- new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void),
- inst: Air.Inst.Index,
- main_tomb: bool,
- operands: [bpi - 1]Air.Inst.Ref,
-) Allocator.Error!void {
- const table = &a.table;
- const gpa = a.gpa;
+ .main_analysis => {
+ const extra_idx = a.special.fetchRemove(inst).?.value; // remove because this data does not exist after analysis
- var tomb_bits: Bpi = @boolToInt(main_tomb);
- var i = operands.len;
+ const num_breaks = data.old_extra.items[extra_idx];
+ const breaks = data.old_extra.items[extra_idx + 1 ..][0..num_breaks];
- while (i > 0) {
- i -= 1;
- tomb_bits <<= 1;
- const op_int = @enumToInt(operands[i]);
- if (op_int < Air.Inst.Ref.typed_value_map.len) continue;
- const operand: Air.Inst.Index = op_int - @intCast(u32, Air.Inst.Ref.typed_value_map.len);
- const prev = try table.fetchPut(gpa, operand, {});
- if (prev == null) {
- // Death.
- tomb_bits |= 1;
- if (new_set) |ns| try ns.putNoClobber(gpa, operand, {});
- }
- }
- a.storeTombBits(inst, tomb_bits);
-}
+ const num_loop_live = data.old_extra.items[extra_idx + num_breaks + 1];
+ const loop_live = data.old_extra.items[extra_idx + num_breaks + 2 ..][0..num_loop_live];
-const ExtraTombs = struct {
- analysis: *Analysis,
- new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void),
- inst: Air.Inst.Index,
- main_tomb: bool,
- bit_index: usize = 0,
- tomb_bits: Bpi = 0,
- big_tomb_bits: u32 = 0,
- big_tomb_bits_extra: std.ArrayListUnmanaged(u32) = .{},
-
- fn feed(et: *ExtraTombs, op_ref: Air.Inst.Ref) !void {
- const this_bit_index = et.bit_index;
- et.bit_index += 1;
- const gpa = et.analysis.gpa;
- const op_index = Air.refToIndex(op_ref) orelse return;
- const prev = try et.analysis.table.fetchPut(gpa, op_index, {});
- if (prev == null) {
- // Death.
- if (et.new_set) |ns| try ns.putNoClobber(gpa, op_index, {});
- const available_tomb_bits = bpi - 1;
- if (this_bit_index < available_tomb_bits) {
- et.tomb_bits |= @as(Bpi, 1) << @intCast(OperandInt, this_bit_index);
- } else {
- const big_bit_index = this_bit_index - available_tomb_bits;
- while (big_bit_index >= (et.big_tomb_bits_extra.items.len + 1) * 31) {
- // We need another element in the extra array.
- try et.big_tomb_bits_extra.append(gpa, et.big_tomb_bits);
- et.big_tomb_bits = 0;
- } else {
- const final_bit_index = big_bit_index - et.big_tomb_bits_extra.items.len * 31;
- et.big_tomb_bits |= @as(u32, 1) << @intCast(u5, final_bit_index);
- }
+ // This is necessarily not in the same control flow branch, because loops are noreturn
+ data.live_set.clearRetainingCapacity();
+
+ try data.live_set.ensureUnusedCapacity(gpa, @intCast(u32, loop_live.len));
+ for (loop_live) |alive| {
+ data.live_set.putAssumeCapacity(alive, {});
+ // If the loop requires a branch death operand to be alive, it's not something we
+ // want to kill: its "last use" (i.e. the point at which it should die) is the loop
+ // body itself.
+ _ = data.branch_deaths.remove(alive);
}
- }
- }
- fn finish(et: *ExtraTombs) !void {
- et.tomb_bits |= @as(Bpi, @boolToInt(et.main_tomb)) << (bpi - 1);
- // Signal the terminal big_tomb_bits element.
- et.big_tomb_bits |= @as(u32, 1) << 31;
-
- et.analysis.storeTombBits(et.inst, et.tomb_bits);
- const extra_index = @intCast(u32, et.analysis.extra.items.len);
- try et.analysis.extra.ensureUnusedCapacity(et.analysis.gpa, et.big_tomb_bits_extra.items.len + 1);
- try et.analysis.special.put(et.analysis.gpa, et.inst, extra_index);
- et.analysis.extra.appendSliceAssumeCapacity(et.big_tomb_bits_extra.items);
- et.analysis.extra.appendAssumeCapacity(et.big_tomb_bits);
- }
+ log.debug("[{}] %{}: block live set is {}", .{ pass, inst, fmtInstSet(&data.live_set) });
- fn deinit(et: *ExtraTombs) void {
- et.big_tomb_bits_extra.deinit(et.analysis.gpa);
- }
-};
+ for (breaks) |block_inst| {
+ // We might break to this block, so include every operand that the block needs alive
+ const block_scope = data.block_scopes.get(block_inst).?;
-/// Remove any deaths invalidated by the deaths from an enclosing `loop`. Reshuffling deaths stored
-/// in `extra` causes it to become non-dense, but that's fine - we won't remove too much data.
-/// Making it dense would be a lot more work - it'd require recomputing every index in `special`.
-fn removeDeaths(
- a: *Analysis,
- to_remove: *std.AutoHashMapUnmanaged(Air.Inst.Index, void),
- body: []const Air.Inst.Index,
-) error{OutOfMemory}!void {
- for (body) |inst| {
- try removeInstDeaths(a, to_remove, inst);
+ var it = block_scope.live_set.keyIterator();
+ while (it.next()) |key| {
+ const alive = key.*;
+ try data.live_set.put(gpa, alive, {});
+ }
+ }
+
+ try analyzeBody(a, pass, data, body);
+ },
}
}
-fn removeInstDeaths(
+/// Despite its name, this function is used for analysis of not only `cond_br` instructions, but
+/// also `try` and `try_ptr`, which are highly related. The `inst_type` parameter indicates which
+/// type of instruction `inst` points to.
+fn analyzeInstCondBr(
a: *Analysis,
- to_remove: *std.AutoHashMapUnmanaged(Air.Inst.Index, void),
+ comptime pass: LivenessPass,
+ data: *LivenessPassData(pass),
inst: Air.Inst.Index,
+ comptime inst_type: enum { cond_br, @"try", try_ptr },
) !void {
- const inst_tags = a.air.instructions.items(.tag);
const inst_datas = a.air.instructions.items(.data);
+ const gpa = a.gpa;
- switch (inst_tags[inst]) {
- .add,
- .add_optimized,
- .addwrap,
- .addwrap_optimized,
- .add_sat,
- .sub,
- .sub_optimized,
- .subwrap,
- .subwrap_optimized,
- .sub_sat,
- .mul,
- .mul_optimized,
- .mulwrap,
- .mulwrap_optimized,
- .mul_sat,
- .div_float,
- .div_float_optimized,
- .div_trunc,
- .div_trunc_optimized,
- .div_floor,
- .div_floor_optimized,
- .div_exact,
- .div_exact_optimized,
- .rem,
- .rem_optimized,
- .mod,
- .mod_optimized,
- .bit_and,
- .bit_or,
- .xor,
- .cmp_lt,
- .cmp_lt_optimized,
- .cmp_lte,
- .cmp_lte_optimized,
- .cmp_eq,
- .cmp_eq_optimized,
- .cmp_gte,
- .cmp_gte_optimized,
- .cmp_gt,
- .cmp_gt_optimized,
- .cmp_neq,
- .cmp_neq_optimized,
- .bool_and,
- .bool_or,
- .store,
- .array_elem_val,
- .slice_elem_val,
- .ptr_elem_val,
- .shl,
- .shl_exact,
- .shl_sat,
- .shr,
- .shr_exact,
- .atomic_store_unordered,
- .atomic_store_monotonic,
- .atomic_store_release,
- .atomic_store_seq_cst,
- .set_union_tag,
- .min,
- .max,
- => {
- const o = inst_datas[inst].bin_op;
- removeOperandDeaths(a, to_remove, inst, .{ o.lhs, o.rhs, .none });
- },
+ const extra = switch (inst_type) {
+ .cond_br => a.air.extraData(Air.CondBr, inst_datas[inst].pl_op.payload),
+ .@"try" => a.air.extraData(Air.Try, inst_datas[inst].pl_op.payload),
+ .try_ptr => a.air.extraData(Air.TryPtr, inst_datas[inst].ty_pl.payload),
+ };
- .vector_store_elem => {
- const o = inst_datas[inst].vector_store_elem;
- const extra = a.air.extraData(Air.Bin, o.payload).data;
- removeOperandDeaths(a, to_remove, inst, .{ o.vector_ptr, extra.lhs, extra.rhs });
- },
+ const condition = switch (inst_type) {
+ .cond_br, .@"try" => inst_datas[inst].pl_op.operand,
+ .try_ptr => extra.data.ptr,
+ };
- .arg,
- .alloc,
- .ret_ptr,
- .constant,
- .const_ty,
- .trap,
- .breakpoint,
- .dbg_stmt,
- .dbg_inline_begin,
- .dbg_inline_end,
- .dbg_block_begin,
- .dbg_block_end,
- .unreach,
- .fence,
- .ret_addr,
- .frame_addr,
- .wasm_memory_size,
- .err_return_trace,
- .save_err_return_trace_index,
- .c_va_start,
- .work_item_id,
- .work_group_size,
- .work_group_id,
- => {},
+ const then_body = switch (inst_type) {
+ .cond_br => a.air.extra[extra.end..][0..extra.data.then_body_len],
+ else => {}, // we won't use this
+ };
- .not,
- .bitcast,
- .load,
- .fpext,
- .fptrunc,
- .intcast,
- .trunc,
- .optional_payload,
- .optional_payload_ptr,
- .optional_payload_ptr_set,
- .errunion_payload_ptr_set,
- .wrap_optional,
- .unwrap_errunion_payload,
- .unwrap_errunion_err,
- .unwrap_errunion_payload_ptr,
- .unwrap_errunion_err_ptr,
- .wrap_errunion_payload,
- .wrap_errunion_err,
- .slice_ptr,
- .slice_len,
- .ptr_slice_len_ptr,
- .ptr_slice_ptr_ptr,
- .struct_field_ptr_index_0,
- .struct_field_ptr_index_1,
- .struct_field_ptr_index_2,
- .struct_field_ptr_index_3,
- .array_to_slice,
- .float_to_int,
- .float_to_int_optimized,
- .int_to_float,
- .get_union_tag,
- .clz,
- .ctz,
- .popcount,
- .byte_swap,
- .bit_reverse,
- .splat,
- .error_set_has_value,
- .addrspace_cast,
- .c_va_arg,
- .c_va_copy,
- => {
- const o = inst_datas[inst].ty_op;
- removeOperandDeaths(a, to_remove, inst, .{ o.operand, .none, .none });
- },
+ const else_body = switch (inst_type) {
+ .cond_br => a.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len],
+ .@"try", .try_ptr => a.air.extra[extra.end..][0..extra.data.body_len],
+ };
- .is_null,
- .is_non_null,
- .is_null_ptr,
- .is_non_null_ptr,
- .is_err,
- .is_non_err,
- .is_err_ptr,
- .is_non_err_ptr,
- .ptrtoint,
- .bool_to_int,
- .ret,
- .ret_load,
- .is_named_enum_value,
- .tag_name,
- .error_name,
- .sqrt,
- .sin,
- .cos,
- .tan,
- .exp,
- .exp2,
- .log,
- .log2,
- .log10,
- .fabs,
- .floor,
- .ceil,
- .round,
- .trunc_float,
- .neg,
- .neg_optimized,
- .cmp_lt_errors_len,
- .set_err_return_trace,
- .c_va_end,
- => {
- const operand = inst_datas[inst].un_op;
- removeOperandDeaths(a, to_remove, inst, .{ operand, .none, .none });
- },
+ switch (pass) {
+ .loop_analysis => {
+ switch (inst_type) {
+ .cond_br => try analyzeBody(a, pass, data, then_body),
+ .@"try", .try_ptr => {},
+ }
+ try analyzeBody(a, pass, data, else_body);
+ },
+
+ .main_analysis => {
+ var then_info: ControlBranchInfo = switch (inst_type) {
+ .cond_br => try analyzeBodyResetBranch(a, pass, data, then_body),
+ .@"try", .try_ptr => blk: {
+ var branch_deaths = try data.branch_deaths.clone(gpa);
+ errdefer branch_deaths.deinit(gpa);
+ var live_set = try data.live_set.clone(gpa);
+ errdefer live_set.deinit(gpa);
+ break :blk .{
+ .branch_deaths = branch_deaths,
+ .live_set = live_set,
+ };
+ },
+ };
+ defer then_info.branch_deaths.deinit(gpa);
+ defer then_info.live_set.deinit(gpa);
+
+ // If this is a `try`, the "then body" (rest of the branch) might have referenced our
+ // result. If so, we want to avoid this value being considered live while analyzing the
+ // else branch.
+ switch (inst_type) {
+ .cond_br => {},
+ .@"try", .try_ptr => _ = data.live_set.remove(inst),
+ }
- .add_with_overflow,
- .sub_with_overflow,
- .mul_with_overflow,
- .shl_with_overflow,
- .ptr_add,
- .ptr_sub,
- .ptr_elem_ptr,
- .slice_elem_ptr,
- .slice,
- => {
- const ty_pl = inst_datas[inst].ty_pl;
- const extra = a.air.extraData(Air.Bin, ty_pl.payload).data;
- removeOperandDeaths(a, to_remove, inst, .{ extra.lhs, extra.rhs, .none });
- },
+ try analyzeBody(a, pass, data, else_body);
+ var else_info: ControlBranchInfo = .{
+ .branch_deaths = data.branch_deaths.move(),
+ .live_set = data.live_set.move(),
+ };
+ defer else_info.branch_deaths.deinit(gpa);
+ defer else_info.live_set.deinit(gpa);
- .dbg_var_ptr,
- .dbg_var_val,
- => {
- const operand = inst_datas[inst].pl_op.operand;
- removeOperandDeaths(a, to_remove, inst, .{ operand, .none, .none });
- },
+ // Any queued deaths shared between both branches can be queued for us instead
+ {
+ var it = then_info.branch_deaths.keyIterator();
+ while (it.next()) |key| {
+ const death = key.*;
+ if (else_info.branch_deaths.remove(death)) {
+ // We'll remove it from then_deaths below
+ try data.branch_deaths.put(gpa, death, {});
+ }
+ }
+ log.debug("[{}] %{}: bubbled deaths {}", .{ pass, inst, fmtInstSet(&data.branch_deaths) });
+ it = data.branch_deaths.keyIterator();
+ while (it.next()) |key| {
+ const death = key.*;
+ assert(then_info.branch_deaths.remove(death));
+ }
+ }
- .prefetch => {
- const prefetch = inst_datas[inst].prefetch;
- removeOperandDeaths(a, to_remove, inst, .{ prefetch.ptr, .none, .none });
- },
+ log.debug("[{}] %{}: remaining 'then' branch deaths are {}", .{ pass, inst, fmtInstSet(&then_info.branch_deaths) });
+ log.debug("[{}] %{}: remaining 'else' branch deaths are {}", .{ pass, inst, fmtInstSet(&else_info.branch_deaths) });
- .call, .call_always_tail, .call_never_tail, .call_never_inline => {
- const inst_data = inst_datas[inst].pl_op;
- const callee = inst_data.operand;
- const extra = a.air.extraData(Air.Call, inst_data.payload);
- const args = @ptrCast([]const Air.Inst.Ref, a.air.extra[extra.end..][0..extra.data.args_len]);
+ // Deaths that occur in one branch but not another need to be made to occur at the start
+ // of the other branch.
- var death_remover = BigTombDeathRemover.init(a, to_remove, inst);
- death_remover.feed(callee);
- for (args) |operand| {
- death_remover.feed(operand);
- }
- death_remover.finish();
- },
- .select => {
- const pl_op = inst_datas[inst].pl_op;
- const extra = a.air.extraData(Air.Bin, pl_op.payload).data;
- removeOperandDeaths(a, to_remove, inst, .{ pl_op.operand, extra.lhs, extra.rhs });
- },
- .shuffle => {
- const extra = a.air.extraData(Air.Shuffle, inst_datas[inst].ty_pl.payload).data;
- removeOperandDeaths(a, to_remove, inst, .{ extra.a, extra.b, .none });
- },
- .reduce, .reduce_optimized => {
- const reduce = inst_datas[inst].reduce;
- removeOperandDeaths(a, to_remove, inst, .{ reduce.operand, .none, .none });
- },
- .cmp_vector, .cmp_vector_optimized => {
- const extra = a.air.extraData(Air.VectorCmp, inst_datas[inst].ty_pl.payload).data;
- removeOperandDeaths(a, to_remove, inst, .{ extra.lhs, extra.rhs, .none });
- },
- .aggregate_init => {
- const ty_pl = inst_datas[inst].ty_pl;
- const aggregate_ty = a.air.getRefType(ty_pl.ty);
- const len = @intCast(usize, aggregate_ty.arrayLen());
- const elements = @ptrCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]);
+ var then_mirrored_deaths: std.ArrayListUnmanaged(Air.Inst.Index) = .{};
+ defer then_mirrored_deaths.deinit(gpa);
- var death_remover = BigTombDeathRemover.init(a, to_remove, inst);
- for (elements) |elem| {
- death_remover.feed(elem);
- }
- death_remover.finish();
- },
- .union_init => {
- const extra = a.air.extraData(Air.UnionInit, inst_datas[inst].ty_pl.payload).data;
- removeOperandDeaths(a, to_remove, inst, .{ extra.init, .none, .none });
- },
- .struct_field_ptr, .struct_field_val => {
- const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data;
- removeOperandDeaths(a, to_remove, inst, .{ extra.struct_operand, .none, .none });
- },
- .field_parent_ptr => {
- const extra = a.air.extraData(Air.FieldParentPtr, inst_datas[inst].ty_pl.payload).data;
- removeOperandDeaths(a, to_remove, inst, .{ extra.field_ptr, .none, .none });
- },
- .cmpxchg_strong, .cmpxchg_weak => {
- const extra = a.air.extraData(Air.Cmpxchg, inst_datas[inst].ty_pl.payload).data;
- removeOperandDeaths(a, to_remove, inst, .{ extra.ptr, extra.expected_value, extra.new_value });
- },
- .mul_add => {
- const pl_op = inst_datas[inst].pl_op;
- const extra = a.air.extraData(Air.Bin, pl_op.payload).data;
- removeOperandDeaths(a, to_remove, inst, .{ extra.lhs, extra.rhs, pl_op.operand });
- },
- .atomic_load => {
- const ptr = inst_datas[inst].atomic_load.ptr;
- removeOperandDeaths(a, to_remove, inst, .{ ptr, .none, .none });
- },
- .atomic_rmw => {
- const pl_op = inst_datas[inst].pl_op;
- const extra = a.air.extraData(Air.AtomicRmw, pl_op.payload).data;
- removeOperandDeaths(a, to_remove, inst, .{ pl_op.operand, extra.operand, .none });
- },
- .memset,
- .memcpy,
- => {
- const pl_op = inst_datas[inst].pl_op;
- const extra = a.air.extraData(Air.Bin, pl_op.payload).data;
- removeOperandDeaths(a, to_remove, inst, .{ pl_op.operand, extra.lhs, extra.rhs });
- },
+ var else_mirrored_deaths: std.ArrayListUnmanaged(Air.Inst.Index) = .{};
+ defer else_mirrored_deaths.deinit(gpa);
- .br => {
- const br = inst_datas[inst].br;
- removeOperandDeaths(a, to_remove, inst, .{ br.operand, .none, .none });
- },
- .assembly => {
- const extra = a.air.extraData(Air.Asm, inst_datas[inst].ty_pl.payload);
- var extra_i: usize = extra.end;
- const outputs = @ptrCast([]const Air.Inst.Ref, a.air.extra[extra_i..][0..extra.data.outputs_len]);
- extra_i += outputs.len;
- const inputs = @ptrCast([]const Air.Inst.Ref, a.air.extra[extra_i..][0..extra.data.inputs_len]);
- extra_i += inputs.len;
+ // Note: this invalidates `else_info.live_set`, but expands `then_info.live_set` to
+ // be their union
+ {
+ var it = then_info.live_set.keyIterator();
+ while (it.next()) |key| {
+ const death = key.*;
+ if (else_info.live_set.remove(death)) continue; // removing makes the loop below faster
+ if (else_info.branch_deaths.contains(death)) continue;
+
+ // If this is a `try`, the "then body" (rest of the branch) might have
+ // referenced our result. We want to avoid killing this value in the else branch
+ // if that's the case, since it only exists in the (fake) then branch.
+ switch (inst_type) {
+ .cond_br => {},
+ .@"try", .try_ptr => if (death == inst) continue,
+ }
- var death_remover = BigTombDeathRemover.init(a, to_remove, inst);
- for (outputs) |output| {
- if (output != .none) {
- death_remover.feed(output);
+ try else_mirrored_deaths.append(gpa, death);
+ }
+ // Since we removed common stuff above, `else_info.live_set` is now only operands
+ // which are *only* alive in the else branch
+ it = else_info.live_set.keyIterator();
+ while (it.next()) |key| {
+ const death = key.*;
+ if (!then_info.branch_deaths.contains(death)) {
+ try then_mirrored_deaths.append(gpa, death);
+ }
+ // Make `then_info.live_set` contain the full live set (i.e. union of both)
+ try then_info.live_set.put(gpa, death, {});
}
}
- for (inputs) |input| {
- death_remover.feed(input);
- }
- death_remover.finish();
- },
- .block => {
- const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload);
- const body = a.air.extra[extra.end..][0..extra.data.body_len];
- try removeDeaths(a, to_remove, body);
- },
- .loop => {
- const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload);
- const body = a.air.extra[extra.end..][0..extra.data.body_len];
- const liveness_extra_idx = a.special.get(inst) orelse {
- try removeDeaths(a, to_remove, body);
- return;
- };
+ log.debug("[{}] %{}: 'then' branch mirrored deaths are {}", .{ pass, inst, fmtInstList(then_mirrored_deaths.items) });
+ log.debug("[{}] %{}: 'else' branch mirrored deaths are {}", .{ pass, inst, fmtInstList(else_mirrored_deaths.items) });
+
+ data.live_set.deinit(gpa);
+ data.live_set = then_info.live_set.move();
- const death_count = a.extra.items[liveness_extra_idx];
- var deaths = a.extra.items[liveness_extra_idx + 1 ..][0..death_count];
+ log.debug("[{}] %{}: new live set is {}", .{ pass, inst, fmtInstSet(&data.live_set) });
- // Remove any deaths in `to_remove` from this loop's deaths
- deaths.len = removeExtraDeaths(to_remove, deaths);
- a.extra.items[liveness_extra_idx] = @intCast(u32, deaths.len);
+ // Write the branch deaths to `extra`
+ const then_death_count = then_info.branch_deaths.count() + @intCast(u32, then_mirrored_deaths.items.len);
+ const else_death_count = else_info.branch_deaths.count() + @intCast(u32, else_mirrored_deaths.items.len);
- // Temporarily add any deaths of ours to `to_remove`
- try to_remove.ensureUnusedCapacity(a.gpa, @intCast(u32, deaths.len));
- for (deaths) |d| {
- to_remove.putAssumeCapacity(d, {});
+ try a.extra.ensureUnusedCapacity(gpa, std.meta.fields(CondBr).len + then_death_count + else_death_count);
+ const extra_index = a.addExtraAssumeCapacity(CondBr{
+ .then_death_count = then_death_count,
+ .else_death_count = else_death_count,
+ });
+ a.extra.appendSliceAssumeCapacity(then_mirrored_deaths.items);
+ {
+ var it = then_info.branch_deaths.keyIterator();
+ while (it.next()) |key| a.extra.appendAssumeCapacity(key.*);
}
- try removeDeaths(a, to_remove, body);
- for (deaths) |d| {
- _ = to_remove.remove(d);
+ a.extra.appendSliceAssumeCapacity(else_mirrored_deaths.items);
+ {
+ var it = else_info.branch_deaths.keyIterator();
+ while (it.next()) |key| a.extra.appendAssumeCapacity(key.*);
}
+ try a.special.put(gpa, inst, extra_index);
},
- .@"try" => {
- const pl_op = inst_datas[inst].pl_op;
- const extra = a.air.extraData(Air.Try, pl_op.payload);
- const body = a.air.extra[extra.end..][0..extra.data.body_len];
- try removeDeaths(a, to_remove, body);
- removeOperandDeaths(a, to_remove, inst, .{ pl_op.operand, .none, .none });
- },
- .try_ptr => {
- const extra = a.air.extraData(Air.TryPtr, inst_datas[inst].ty_pl.payload);
- const body = a.air.extra[extra.end..][0..extra.data.body_len];
- try removeDeaths(a, to_remove, body);
- removeOperandDeaths(a, to_remove, inst, .{ extra.data.ptr, .none, .none });
- },
- .cond_br => {
- const inst_data = inst_datas[inst].pl_op;
- const condition = inst_data.operand;
- const extra = a.air.extraData(Air.CondBr, inst_data.payload);
- const then_body = a.air.extra[extra.end..][0..extra.data.then_body_len];
- const else_body = a.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
-
- if (a.special.get(inst)) |liveness_extra_idx| {
- const then_death_count = a.extra.items[liveness_extra_idx + 0];
- const else_death_count = a.extra.items[liveness_extra_idx + 1];
- var then_deaths = a.extra.items[liveness_extra_idx + 2 ..][0..then_death_count];
- var else_deaths = a.extra.items[liveness_extra_idx + 2 + then_death_count ..][0..else_death_count];
-
- const new_then_death_count = removeExtraDeaths(to_remove, then_deaths);
- const new_else_death_count = removeExtraDeaths(to_remove, else_deaths);
-
- a.extra.items[liveness_extra_idx + 0] = new_then_death_count;
- a.extra.items[liveness_extra_idx + 1] = new_else_death_count;
-
- if (new_then_death_count < then_death_count) {
- // `else` deaths need to be moved earlier in `extra`
- const src = a.extra.items[liveness_extra_idx + 2 + then_death_count ..];
- const dest = a.extra.items[liveness_extra_idx + 2 + new_then_death_count ..];
- std.mem.copy(u32, dest, src[0..new_else_death_count]);
- }
- }
+ }
- try removeDeaths(a, to_remove, then_body);
- try removeDeaths(a, to_remove, else_body);
+ try analyzeOperands(a, pass, data, inst, .{ condition, .none, .none });
+}
- removeOperandDeaths(a, to_remove, inst, .{ condition, .none, .none });
+fn analyzeInstSwitchBr(
+ a: *Analysis,
+ comptime pass: LivenessPass,
+ data: *LivenessPassData(pass),
+ inst: Air.Inst.Index,
+) !void {
+ const inst_datas = a.air.instructions.items(.data);
+ const pl_op = inst_datas[inst].pl_op;
+ const condition = pl_op.operand;
+ const switch_br = a.air.extraData(Air.SwitchBr, pl_op.payload);
+ const gpa = a.gpa;
+ const ncases = switch_br.data.cases_len;
+
+ switch (pass) {
+ .loop_analysis => {
+ var air_extra_index: usize = switch_br.end;
+ for (0..ncases) |_| {
+ const case = a.air.extraData(Air.SwitchBr.Case, air_extra_index);
+ const case_body = a.air.extra[case.end + case.data.items_len ..][0..case.data.body_len];
+ air_extra_index = case.end + case.data.items_len + case_body.len;
+ try analyzeBody(a, pass, data, case_body);
+ }
+ { // else
+ const else_body = a.air.extra[air_extra_index..][0..switch_br.data.else_body_len];
+ try analyzeBody(a, pass, data, else_body);
+ }
},
- .switch_br => {
- const pl_op = inst_datas[inst].pl_op;
- const condition = pl_op.operand;
- const switch_br = a.air.extraData(Air.SwitchBr, pl_op.payload);
+
+ .main_analysis => {
+ // This is, all in all, just a messier version of the `cond_br` logic. If you're trying
+ // to understand it, I encourage looking at `analyzeInstCondBr` first.
+
+ const DeathSet = std.AutoHashMapUnmanaged(Air.Inst.Index, void);
+ const DeathList = std.ArrayListUnmanaged(Air.Inst.Index);
+
+ var case_infos = try gpa.alloc(ControlBranchInfo, ncases + 1); // +1 for else
+ defer gpa.free(case_infos);
+
+ std.mem.set(ControlBranchInfo, case_infos, .{});
+ defer for (case_infos) |*info| {
+ info.branch_deaths.deinit(gpa);
+ info.live_set.deinit(gpa);
+ };
var air_extra_index: usize = switch_br.end;
- for (0..switch_br.data.cases_len) |_| {
+ for (case_infos[0..ncases]) |*info| {
const case = a.air.extraData(Air.SwitchBr.Case, air_extra_index);
const case_body = a.air.extra[case.end + case.data.items_len ..][0..case.data.body_len];
air_extra_index = case.end + case.data.items_len + case_body.len;
- try removeDeaths(a, to_remove, case_body);
+ info.* = try analyzeBodyResetBranch(a, pass, data, case_body);
}
{ // else
const else_body = a.air.extra[air_extra_index..][0..switch_br.data.else_body_len];
- try removeDeaths(a, to_remove, else_body);
+ try analyzeBody(a, pass, data, else_body);
+ case_infos[ncases] = .{
+ .branch_deaths = data.branch_deaths.move(),
+ .live_set = data.live_set.move(),
+ };
}
- if (a.special.get(inst)) |liveness_extra_idx| {
- const else_death_count = a.extra.items[liveness_extra_idx];
- var read_idx = liveness_extra_idx + 1;
- var write_idx = read_idx; // write_idx <= read_idx always
- for (0..switch_br.data.cases_len) |_| {
- const case_death_count = a.extra.items[read_idx];
- const case_deaths = a.extra.items[read_idx + 1 ..][0..case_death_count];
- const new_death_count = removeExtraDeaths(to_remove, case_deaths);
- a.extra.items[write_idx] = new_death_count;
- if (write_idx < read_idx) {
- std.mem.copy(u32, a.extra.items[write_idx + 1 ..], a.extra.items[read_idx + 1 ..][0..new_death_count]);
+ // Queued deaths common to all cases can be bubbled up
+ {
+ // We can't remove from the set we're iterating over, so we'll store the shared deaths here
+ // temporarily to remove them
+ var shared_deaths: DeathSet = .{};
+ defer shared_deaths.deinit(gpa);
+
+ var it = case_infos[0].branch_deaths.keyIterator();
+ while (it.next()) |key| {
+ const death = key.*;
+ for (case_infos[1..]) |*info| {
+ if (!info.branch_deaths.contains(death)) break;
+ } else try shared_deaths.put(gpa, death, {});
+ }
+
+ log.debug("[{}] %{}: bubbled deaths {}", .{ pass, inst, fmtInstSet(&shared_deaths) });
+
+ try data.branch_deaths.ensureUnusedCapacity(gpa, shared_deaths.count());
+ it = shared_deaths.keyIterator();
+ while (it.next()) |key| {
+ const death = key.*;
+ data.branch_deaths.putAssumeCapacity(death, {});
+ for (case_infos) |*info| {
+ _ = info.branch_deaths.remove(death);
}
- read_idx += 1 + case_death_count;
- write_idx += 1 + new_death_count;
}
- const else_deaths = a.extra.items[read_idx..][0..else_death_count];
- const new_else_death_count = removeExtraDeaths(to_remove, else_deaths);
- a.extra.items[liveness_extra_idx] = new_else_death_count;
- if (write_idx < read_idx) {
- std.mem.copy(u32, a.extra.items[write_idx..], a.extra.items[read_idx..][0..new_else_death_count]);
+
+ for (case_infos, 0..) |*info, i| {
+ log.debug("[{}] %{}: case {} remaining branch deaths are {}", .{ pass, inst, i, fmtInstSet(&info.branch_deaths) });
}
}
- removeOperandDeaths(a, to_remove, inst, .{ condition, .none, .none });
- },
- .wasm_memory_grow => {
- const pl_op = inst_datas[inst].pl_op;
- removeOperandDeaths(a, to_remove, inst, .{ pl_op.operand, .none, .none });
+ const mirrored_deaths = try gpa.alloc(DeathList, ncases + 1);
+ defer gpa.free(mirrored_deaths);
+
+ std.mem.set(DeathList, mirrored_deaths, .{});
+ defer for (mirrored_deaths) |*md| md.deinit(gpa);
+
+ {
+ var all_alive: DeathSet = .{};
+ defer all_alive.deinit(gpa);
+
+ for (case_infos) |*info| {
+ try all_alive.ensureUnusedCapacity(gpa, info.live_set.count());
+ var it = info.live_set.keyIterator();
+ while (it.next()) |key| {
+ const alive = key.*;
+ all_alive.putAssumeCapacity(alive, {});
+ }
+ }
+
+ for (mirrored_deaths, case_infos) |*mirrored, *info| {
+ var it = all_alive.keyIterator();
+ while (it.next()) |key| {
+ const alive = key.*;
+ if (!info.live_set.contains(alive) and !info.branch_deaths.contains(alive)) {
+ // Should die at the start of this branch
+ try mirrored.append(gpa, alive);
+ }
+ }
+ }
+
+ for (mirrored_deaths, 0..) |mirrored, i| {
+ log.debug("[{}] %{}: case {} mirrored deaths are {}", .{ pass, inst, i, fmtInstList(mirrored.items) });
+ }
+
+ data.live_set.deinit(gpa);
+ data.live_set = all_alive.move();
+
+ log.debug("[{}] %{}: new live set is {}", .{ pass, inst, fmtInstSet(&data.live_set) });
+ }
+
+ const else_death_count = case_infos[ncases].branch_deaths.count() + @intCast(u32, mirrored_deaths[ncases].items.len);
+
+ const extra_index = try a.addExtra(SwitchBr{
+ .else_death_count = else_death_count,
+ });
+ for (mirrored_deaths[0..ncases], case_infos[0..ncases]) |mirrored, info| {
+ const num = info.branch_deaths.count() + @intCast(u32, mirrored.items.len);
+ try a.extra.ensureUnusedCapacity(gpa, num + 1);
+ a.extra.appendAssumeCapacity(num);
+ a.extra.appendSliceAssumeCapacity(mirrored.items);
+ {
+ var it = info.branch_deaths.keyIterator();
+ while (it.next()) |key| a.extra.appendAssumeCapacity(key.*);
+ }
+ }
+ try a.extra.ensureUnusedCapacity(gpa, else_death_count);
+ a.extra.appendSliceAssumeCapacity(mirrored_deaths[ncases].items);
+ {
+ var it = case_infos[ncases].branch_deaths.keyIterator();
+ while (it.next()) |key| a.extra.appendAssumeCapacity(key.*);
+ }
+ try a.special.put(gpa, inst, extra_index);
},
}
+
+ try analyzeOperands(a, pass, data, inst, .{ condition, .none, .none });
}
-fn removeOperandDeaths(
- a: *Analysis,
- to_remove: *const std.AutoHashMapUnmanaged(Air.Inst.Index, void),
- inst: Air.Inst.Index,
- operands: [bpi - 1]Air.Inst.Ref,
-) void {
- const usize_index = (inst * bpi) / @bitSizeOf(usize);
+fn AnalyzeBigOperands(comptime pass: LivenessPass) type {
+ return struct {
+ a: *Analysis,
+ data: *LivenessPassData(pass),
+ inst: Air.Inst.Index,
+
+ operands_remaining: u32,
+ small: [bpi - 1]Air.Inst.Ref = .{.none} ** (bpi - 1),
+ extra_tombs: []u32,
+
+ // Only used in `LivenessPass.main_analysis`
+ will_die_immediately: bool,
+
+ const Self = @This();
+
+ fn init(
+ a: *Analysis,
+ data: *LivenessPassData(pass),
+ inst: Air.Inst.Index,
+ total_operands: usize,
+ ) !Self {
+ const extra_operands = @intCast(u32, total_operands) -| (bpi - 1);
+ const max_extra_tombs = (extra_operands + 30) / 31;
+
+ const extra_tombs: []u32 = switch (pass) {
+ .loop_analysis => &.{},
+ .main_analysis => try a.gpa.alloc(u32, max_extra_tombs),
+ };
+ errdefer a.gpa.free(extra_tombs);
- const cur_tomb = @truncate(Bpi, a.tomb_bits[usize_index] >>
- @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi));
+ std.mem.set(u32, extra_tombs, 0);
- var toggle_bits: Bpi = 0;
+ const will_die_immediately: bool = switch (pass) {
+ .loop_analysis => false, // track everything, since we don't have full liveness information yet
+ .main_analysis => data.branch_deaths.contains(inst) and !data.live_set.contains(inst),
+ };
- for (operands, 0..) |op_ref, i| {
- const mask = @as(Bpi, 1) << @intCast(OperandInt, i);
- const op_int = @enumToInt(op_ref);
- if (op_int < Air.Inst.Ref.typed_value_map.len) continue;
- const operand: Air.Inst.Index = op_int - @intCast(u32, Air.Inst.Ref.typed_value_map.len);
- if ((cur_tomb & mask) != 0 and to_remove.contains(operand)) {
- log.debug("remove death of %{} in %{}", .{ operand, inst });
- toggle_bits ^= mask;
+ return .{
+ .a = a,
+ .data = data,
+ .inst = inst,
+ .operands_remaining = @intCast(u32, total_operands),
+ .extra_tombs = extra_tombs,
+ .will_die_immediately = will_die_immediately,
+ };
}
- }
- a.tomb_bits[usize_index] ^= @as(usize, toggle_bits) <<
- @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi);
-}
+ /// Must be called with operands in reverse order.
+ fn feed(big: *Self, op_ref: Air.Inst.Ref) !void {
+ // Note that after this, `operands_remaining` becomes the index of the current operand
+ big.operands_remaining -= 1;
-fn removeExtraDeaths(
- to_remove: *const std.AutoHashMapUnmanaged(Air.Inst.Index, void),
- deaths: []Air.Inst.Index,
-) u32 {
- var new_len = @intCast(u32, deaths.len);
- var i: usize = 0;
- while (i < new_len) {
- if (to_remove.contains(deaths[i])) {
- log.debug("remove extra death of %{}", .{deaths[i]});
- deaths[i] = deaths[new_len - 1];
- new_len -= 1;
- } else {
- i += 1;
- }
- }
- return new_len;
-}
+ if (big.operands_remaining < bpi - 1) {
+ big.small[big.operands_remaining] = op_ref;
+ return;
+ }
-const BigTombDeathRemover = struct {
- a: *Analysis,
- to_remove: *const std.AutoHashMapUnmanaged(Air.Inst.Index, void),
- inst: Air.Inst.Index,
+ const operand = Air.refToIndex(op_ref) orelse return;
- operands: [bpi - 1]Air.Inst.Ref = .{.none} ** (bpi - 1),
- next_oper: OperandInt = 0,
+ // Don't compute any liveness for constants
+ const inst_tags = big.a.air.instructions.items(.tag);
+ switch (inst_tags[operand]) {
+ .constant, .const_ty => return,
+ else => {},
+ }
- bit_index: u32 = 0,
- // Initialized once we finish the small tomb operands: see `feed`
- extra_start: u32 = undefined,
- extra_offset: u32 = 0,
+ // If our result is unused and the instruction doesn't need to be lowered, backends will
+ // skip the lowering of this instruction, so we don't want to record uses of operands.
+ // That way, we can mark as many instructions as possible unused.
+ if (big.will_die_immediately and !big.a.air.mustLower(big.inst)) return;
- fn init(a: *Analysis, to_remove: *const std.AutoHashMapUnmanaged(Air.Inst.Index, void), inst: Air.Inst.Index) BigTombDeathRemover {
- return .{
- .a = a,
- .to_remove = to_remove,
- .inst = inst,
- };
- }
+ const extra_byte = (big.operands_remaining - (bpi - 1)) / 31;
+ const extra_bit = @intCast(u5, big.operands_remaining - (bpi - 1) - extra_byte * 31);
+
+ const gpa = big.a.gpa;
- fn feed(dr: *BigTombDeathRemover, operand: Air.Inst.Ref) void {
- if (dr.next_oper < bpi - 1) {
- dr.operands[dr.next_oper] = operand;
- dr.next_oper += 1;
- if (dr.next_oper == bpi - 1) {
- removeOperandDeaths(dr.a, dr.to_remove, dr.inst, dr.operands);
- if (dr.a.special.get(dr.inst)) |idx| dr.extra_start = idx;
+ switch (pass) {
+ .loop_analysis => {
+ _ = try big.data.live_set.put(gpa, operand, {});
+ },
+
+ .main_analysis => {
+ if ((try big.data.live_set.fetchPut(gpa, operand, {})) == null) {
+ log.debug("[{}] %{}: added %{} to live set (operand dies here)", .{ pass, big.inst, operand });
+ big.extra_tombs[extra_byte] |= @as(u32, 1) << extra_bit;
+ if (big.data.branch_deaths.remove(operand)) {
+ log.debug("[{}] %{}: resolved branch death of %{} to this usage", .{ pass, big.inst, operand });
+ }
+ }
+ },
}
- return;
}
- defer dr.bit_index += 1;
+ fn finish(big: *Self) !void {
+ const gpa = big.a.gpa;
+
+ std.debug.assert(big.operands_remaining == 0);
+
+ switch (pass) {
+ .loop_analysis => {},
+
+ .main_analysis => {
+ // Note that the MSB is set on the final tomb to indicate the terminal element. This
+ // allows for an optimisation where we only add as many extra tombs as are needed to
+ // represent the dying operands. Each pass modifies operand bits and so needs to write
+ // back, so let's figure out how many extra tombs we really need. Note that we always
+ // keep at least one.
+ var num: usize = big.extra_tombs.len;
+ while (num > 1) {
+ if (@truncate(u31, big.extra_tombs[num - 1]) != 0) {
+ // Some operand dies here
+ break;
+ }
+ num -= 1;
+ }
+ // Mark final tomb
+ big.extra_tombs[num - 1] |= @as(u32, 1) << 31;
+
+ const extra_tombs = big.extra_tombs[0..num];
- const op_int = @enumToInt(operand);
- if (op_int < Air.Inst.Ref.typed_value_map.len) return;
+ const extra_index = @intCast(u32, big.a.extra.items.len);
+ try big.a.extra.appendSlice(gpa, extra_tombs);
+ try big.a.special.put(gpa, big.inst, extra_index);
+ },
+ }
- const op_inst: Air.Inst.Index = op_int - @intCast(u32, Air.Inst.Ref.typed_value_map.len);
+ try analyzeOperands(big.a, pass, big.data, big.inst, big.small);
+ }
- while (dr.bit_index - dr.extra_offset * 31 >= 31) {
- dr.extra_offset += 1;
+ fn deinit(big: *Self) void {
+ big.a.gpa.free(big.extra_tombs);
}
- const dies = @truncate(u1, dr.a.extra.items[dr.extra_start + dr.extra_offset] >>
- @intCast(u5, dr.bit_index - dr.extra_offset * 31)) != 0;
+ };
+}
+
+fn fmtInstSet(set: *const std.AutoHashMapUnmanaged(Air.Inst.Index, void)) FmtInstSet {
+ return .{ .set = set };
+}
+
+const FmtInstSet = struct {
+ set: *const std.AutoHashMapUnmanaged(Air.Inst.Index, void),
- if (dies and dr.to_remove.contains(op_inst)) {
- log.debug("remove big death of %{}", .{op_inst});
- dr.a.extra.items[dr.extra_start + dr.extra_offset] ^=
- (@as(u32, 1) << @intCast(u5, dr.bit_index - dr.extra_offset * 31));
+ pub fn format(val: FmtInstSet, comptime _: []const u8, _: std.fmt.FormatOptions, w: anytype) !void {
+ if (val.set.count() == 0) {
+ try w.writeAll("[no instructions]");
+ return;
+ }
+ var it = val.set.keyIterator();
+ try w.print("%{}", .{it.next().?.*});
+ while (it.next()) |key| {
+ try w.print(" %{}", .{key.*});
}
}
+};
- fn finish(dr: *BigTombDeathRemover) void {
- if (dr.next_oper < bpi) {
- removeOperandDeaths(dr.a, dr.to_remove, dr.inst, dr.operands);
+fn fmtInstList(list: []const Air.Inst.Index) FmtInstList {
+ return .{ .list = list };
+}
+
+const FmtInstList = struct {
+ list: []const Air.Inst.Index,
+
+ pub fn format(val: FmtInstList, comptime _: []const u8, _: std.fmt.FormatOptions, w: anytype) !void {
+ if (val.list.len == 0) {
+ try w.writeAll("[no instructions]");
+ return;
+ }
+ try w.print("%{}", .{val.list[0]});
+ for (val.list[1..]) |inst| {
+ try w.print(" %{}", .{inst});
}
}
};
diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig
new file mode 100644
index 0000000000..6c1e72392d
--- /dev/null
+++ b/src/Liveness/Verify.zig
@@ -0,0 +1,610 @@
+//! Verifies that liveness information is valid.
+
+gpa: std.mem.Allocator,
+air: Air,
+liveness: Liveness,
+live: LiveMap = .{},
+blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, LiveMap) = .{},
+
+pub const Error = error{ LivenessInvalid, OutOfMemory };
+
+pub fn deinit(self: *Verify) void {
+ self.live.deinit(self.gpa);
+ var block_it = self.blocks.valueIterator();
+ while (block_it.next()) |block| block.deinit(self.gpa);
+ self.blocks.deinit(self.gpa);
+ self.* = undefined;
+}
+
+pub fn verify(self: *Verify) Error!void {
+ self.live.clearRetainingCapacity();
+ self.blocks.clearRetainingCapacity();
+ try self.verifyBody(self.air.getMainBody());
+ // We don't care about `self.live` now, because the loop body was noreturn - everything being dead was checked on `ret` etc
+ assert(self.blocks.count() == 0);
+}
+
+const LiveMap = std.AutoHashMapUnmanaged(Air.Inst.Index, void);
+
+fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
+ const tag = self.air.instructions.items(.tag);
+ const data = self.air.instructions.items(.data);
+ for (body) |inst| {
+ if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) {
+ // This instruction will not be lowered and should be ignored.
+ continue;
+ }
+
+ switch (tag[inst]) {
+ // no operands
+ .arg,
+ .alloc,
+ .ret_ptr,
+ .constant,
+ .const_ty,
+ .breakpoint,
+ .dbg_stmt,
+ .dbg_inline_begin,
+ .dbg_inline_end,
+ .dbg_block_begin,
+ .dbg_block_end,
+ .fence,
+ .ret_addr,
+ .frame_addr,
+ .wasm_memory_size,
+ .err_return_trace,
+ .save_err_return_trace_index,
+ .c_va_start,
+ .work_item_id,
+ .work_group_size,
+ .work_group_id,
+ => try self.verifyInst(inst, .{ .none, .none, .none }),
+
+ .trap, .unreach => {
+ try self.verifyInst(inst, .{ .none, .none, .none });
+ // This instruction terminates the function, so everything should be dead
+ if (self.live.count() > 0) return invalid("%{}: instructions still alive", .{inst});
+ },
+
+ // unary
+ .not,
+ .bitcast,
+ .load,
+ .fpext,
+ .fptrunc,
+ .intcast,
+ .trunc,
+ .optional_payload,
+ .optional_payload_ptr,
+ .optional_payload_ptr_set,
+ .errunion_payload_ptr_set,
+ .wrap_optional,
+ .unwrap_errunion_payload,
+ .unwrap_errunion_err,
+ .unwrap_errunion_payload_ptr,
+ .unwrap_errunion_err_ptr,
+ .wrap_errunion_payload,
+ .wrap_errunion_err,
+ .slice_ptr,
+ .slice_len,
+ .ptr_slice_len_ptr,
+ .ptr_slice_ptr_ptr,
+ .struct_field_ptr_index_0,
+ .struct_field_ptr_index_1,
+ .struct_field_ptr_index_2,
+ .struct_field_ptr_index_3,
+ .array_to_slice,
+ .float_to_int,
+ .float_to_int_optimized,
+ .int_to_float,
+ .get_union_tag,
+ .clz,
+ .ctz,
+ .popcount,
+ .byte_swap,
+ .bit_reverse,
+ .splat,
+ .error_set_has_value,
+ .addrspace_cast,
+ .c_va_arg,
+ .c_va_copy,
+ => {
+ const ty_op = data[inst].ty_op;
+ try self.verifyInst(inst, .{ ty_op.operand, .none, .none });
+ },
+ .is_null,
+ .is_non_null,
+ .is_null_ptr,
+ .is_non_null_ptr,
+ .is_err,
+ .is_non_err,
+ .is_err_ptr,
+ .is_non_err_ptr,
+ .ptrtoint,
+ .bool_to_int,
+ .is_named_enum_value,
+ .tag_name,
+ .error_name,
+ .sqrt,
+ .sin,
+ .cos,
+ .tan,
+ .exp,
+ .exp2,
+ .log,
+ .log2,
+ .log10,
+ .fabs,
+ .floor,
+ .ceil,
+ .round,
+ .trunc_float,
+ .neg,
+ .neg_optimized,
+ .cmp_lt_errors_len,
+ .set_err_return_trace,
+ .c_va_end,
+ => {
+ const un_op = data[inst].un_op;
+ try self.verifyInst(inst, .{ un_op, .none, .none });
+ },
+ .ret,
+ .ret_load,
+ => {
+ const un_op = data[inst].un_op;
+ try self.verifyInst(inst, .{ un_op, .none, .none });
+ // This instruction terminates the function, so everything should be dead
+ if (self.live.count() > 0) return invalid("%{}: instructions still alive", .{inst});
+ },
+ .dbg_var_ptr,
+ .dbg_var_val,
+ .wasm_memory_grow,
+ => {
+ const pl_op = data[inst].pl_op;
+ try self.verifyInst(inst, .{ pl_op.operand, .none, .none });
+ },
+ .prefetch => {
+ const prefetch = data[inst].prefetch;
+ try self.verifyInst(inst, .{ prefetch.ptr, .none, .none });
+ },
+ .reduce,
+ .reduce_optimized,
+ => {
+ const reduce = data[inst].reduce;
+ try self.verifyInst(inst, .{ reduce.operand, .none, .none });
+ },
+ .union_init => {
+ const ty_pl = data[inst].ty_pl;
+ const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
+ try self.verifyInst(inst, .{ extra.init, .none, .none });
+ },
+ .struct_field_ptr, .struct_field_val => {
+ const ty_pl = data[inst].ty_pl;
+ const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
+ try self.verifyInst(inst, .{ extra.struct_operand, .none, .none });
+ },
+ .field_parent_ptr => {
+ const ty_pl = data[inst].ty_pl;
+ const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
+ try self.verifyInst(inst, .{ extra.field_ptr, .none, .none });
+ },
+ .atomic_load => {
+ const atomic_load = data[inst].atomic_load;
+ try self.verifyInst(inst, .{ atomic_load.ptr, .none, .none });
+ },
+
+ // binary
+ .add,
+ .add_optimized,
+ .addwrap,
+ .addwrap_optimized,
+ .add_sat,
+ .sub,
+ .sub_optimized,
+ .subwrap,
+ .subwrap_optimized,
+ .sub_sat,
+ .mul,
+ .mul_optimized,
+ .mulwrap,
+ .mulwrap_optimized,
+ .mul_sat,
+ .div_float,
+ .div_float_optimized,
+ .div_trunc,
+ .div_trunc_optimized,
+ .div_floor,
+ .div_floor_optimized,
+ .div_exact,
+ .div_exact_optimized,
+ .rem,
+ .rem_optimized,
+ .mod,
+ .mod_optimized,
+ .bit_and,
+ .bit_or,
+ .xor,
+ .cmp_lt,
+ .cmp_lt_optimized,
+ .cmp_lte,
+ .cmp_lte_optimized,
+ .cmp_eq,
+ .cmp_eq_optimized,
+ .cmp_gte,
+ .cmp_gte_optimized,
+ .cmp_gt,
+ .cmp_gt_optimized,
+ .cmp_neq,
+ .cmp_neq_optimized,
+ .bool_and,
+ .bool_or,
+ .store,
+ .array_elem_val,
+ .slice_elem_val,
+ .ptr_elem_val,
+ .shl,
+ .shl_exact,
+ .shl_sat,
+ .shr,
+ .shr_exact,
+ .atomic_store_unordered,
+ .atomic_store_monotonic,
+ .atomic_store_release,
+ .atomic_store_seq_cst,
+ .set_union_tag,
+ .min,
+ .max,
+ => {
+ const bin_op = data[inst].bin_op;
+ try self.verifyInst(inst, .{ bin_op.lhs, bin_op.rhs, .none });
+ },
+ .add_with_overflow,
+ .sub_with_overflow,
+ .mul_with_overflow,
+ .shl_with_overflow,
+ .ptr_add,
+ .ptr_sub,
+ .ptr_elem_ptr,
+ .slice_elem_ptr,
+ .slice,
+ => {
+ const ty_pl = data[inst].ty_pl;
+ const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ try self.verifyInst(inst, .{ extra.lhs, extra.rhs, .none });
+ },
+ .shuffle => {
+ const ty_pl = data[inst].ty_pl;
+ const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
+ try self.verifyInst(inst, .{ extra.a, extra.b, .none });
+ },
+ .cmp_vector,
+ .cmp_vector_optimized,
+ => {
+ const ty_pl = data[inst].ty_pl;
+ const extra = self.air.extraData(Air.VectorCmp, ty_pl.payload).data;
+ try self.verifyInst(inst, .{ extra.lhs, extra.rhs, .none });
+ },
+ .atomic_rmw => {
+ const pl_op = data[inst].pl_op;
+ const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data;
+ try self.verifyInst(inst, .{ pl_op.operand, extra.operand, .none });
+ },
+
+ // ternary
+ .select => {
+ const pl_op = data[inst].pl_op;
+ const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
+ try self.verifyInst(inst, .{ pl_op.operand, extra.lhs, extra.rhs });
+ },
+ .mul_add => {
+ const pl_op = data[inst].pl_op;
+ const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
+ try self.verifyInst(inst, .{ extra.lhs, extra.rhs, pl_op.operand });
+ },
+ .vector_store_elem => {
+ const vector_store_elem = data[inst].vector_store_elem;
+ const extra = self.air.extraData(Air.Bin, vector_store_elem.payload).data;
+ try self.verifyInst(inst, .{ vector_store_elem.vector_ptr, extra.lhs, extra.rhs });
+ },
+ .memset,
+ .memcpy,
+ => {
+ const pl_op = data[inst].pl_op;
+ const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
+ try self.verifyInst(inst, .{ pl_op.operand, extra.lhs, extra.rhs });
+ },
+ .cmpxchg_strong,
+ .cmpxchg_weak,
+ => {
+ const ty_pl = data[inst].ty_pl;
+ const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
+ try self.verifyInst(inst, .{ extra.ptr, extra.expected_value, extra.new_value });
+ },
+
+ // big tombs
+ .aggregate_init => {
+ const ty_pl = data[inst].ty_pl;
+ const aggregate_ty = self.air.getRefType(ty_pl.ty);
+ const len = @intCast(usize, aggregate_ty.arrayLen());
+ const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
+
+ var bt = self.liveness.iterateBigTomb(inst);
+ for (elements) |element| {
+ try self.verifyOperand(inst, element, bt.feed());
+ }
+ try self.verifyInst(inst, .{ .none, .none, .none });
+ },
+ .call, .call_always_tail, .call_never_tail, .call_never_inline => {
+ const pl_op = data[inst].pl_op;
+ const extra = self.air.extraData(Air.Call, pl_op.payload);
+ const args = @ptrCast(
+ []const Air.Inst.Ref,
+ self.air.extra[extra.end..][0..extra.data.args_len],
+ );
+
+ var bt = self.liveness.iterateBigTomb(inst);
+ try self.verifyOperand(inst, pl_op.operand, bt.feed());
+ for (args) |arg| {
+ try self.verifyOperand(inst, arg, bt.feed());
+ }
+ try self.verifyInst(inst, .{ .none, .none, .none });
+ },
+ .assembly => {
+ const ty_pl = data[inst].ty_pl;
+ const extra = self.air.extraData(Air.Asm, ty_pl.payload);
+ var extra_i = extra.end;
+ const outputs = @ptrCast(
+ []const Air.Inst.Ref,
+ self.air.extra[extra_i..][0..extra.data.outputs_len],
+ );
+ extra_i += outputs.len;
+ const inputs = @ptrCast(
+ []const Air.Inst.Ref,
+ self.air.extra[extra_i..][0..extra.data.inputs_len],
+ );
+ extra_i += inputs.len;
+
+ var bt = self.liveness.iterateBigTomb(inst);
+ for (outputs) |output| {
+ if (output != .none) {
+ try self.verifyOperand(inst, output, bt.feed());
+ }
+ }
+ for (inputs) |input| {
+ try self.verifyOperand(inst, input, bt.feed());
+ }
+ try self.verifyInst(inst, .{ .none, .none, .none });
+ },
+
+ // control flow
+ .@"try" => {
+ const pl_op = data[inst].pl_op;
+ const extra = self.air.extraData(Air.Try, pl_op.payload);
+ const try_body = self.air.extra[extra.end..][0..extra.data.body_len];
+
+ const cond_br_liveness = self.liveness.getCondBr(inst);
+
+ try self.verifyOperand(inst, pl_op.operand, self.liveness.operandDies(inst, 0));
+
+ var live = try self.live.clone(self.gpa);
+ defer live.deinit(self.gpa);
+
+ for (cond_br_liveness.else_deaths) |death| try self.verifyDeath(inst, death);
+ try self.verifyBody(try_body);
+
+ self.live.deinit(self.gpa);
+ self.live = live.move();
+
+ for (cond_br_liveness.then_deaths) |death| try self.verifyDeath(inst, death);
+
+ try self.verifyInst(inst, .{ .none, .none, .none });
+ },
+ .try_ptr => {
+ const ty_pl = data[inst].ty_pl;
+ const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
+ const try_body = self.air.extra[extra.end..][0..extra.data.body_len];
+
+ const cond_br_liveness = self.liveness.getCondBr(inst);
+
+ try self.verifyOperand(inst, extra.data.ptr, self.liveness.operandDies(inst, 0));
+
+ var live = try self.live.clone(self.gpa);
+ defer live.deinit(self.gpa);
+
+ for (cond_br_liveness.else_deaths) |death| try self.verifyDeath(inst, death);
+ try self.verifyBody(try_body);
+
+ self.live.deinit(self.gpa);
+ self.live = live.move();
+
+ for (cond_br_liveness.then_deaths) |death| try self.verifyDeath(inst, death);
+
+ try self.verifyInst(inst, .{ .none, .none, .none });
+ },
+ .br => {
+ const br = data[inst].br;
+ const gop = try self.blocks.getOrPut(self.gpa, br.block_inst);
+
+ try self.verifyOperand(inst, br.operand, self.liveness.operandDies(inst, 0));
+ if (gop.found_existing) {
+ try self.verifyMatchingLiveness(br.block_inst, gop.value_ptr.*);
+ } else {
+ gop.value_ptr.* = try self.live.clone(self.gpa);
+ }
+ try self.verifyInst(inst, .{ .none, .none, .none });
+ },
+ .block => {
+ const ty_pl = data[inst].ty_pl;
+ const block_ty = self.air.getRefType(ty_pl.ty);
+ const extra = self.air.extraData(Air.Block, ty_pl.payload);
+ const block_body = self.air.extra[extra.end..][0..extra.data.body_len];
+ const block_liveness = self.liveness.getBlock(inst);
+
+ var orig_live = try self.live.clone(self.gpa);
+ defer orig_live.deinit(self.gpa);
+
+ assert(!self.blocks.contains(inst));
+ try self.verifyBody(block_body);
+
+ // Liveness data after the block body is garbage, but we want to
+ // restore it to verify deaths
+ self.live.deinit(self.gpa);
+ self.live = orig_live.move();
+
+ for (block_liveness.deaths) |death| try self.verifyDeath(inst, death);
+
+ if (block_ty.isNoReturn()) {
+ assert(!self.blocks.contains(inst));
+ } else {
+ var live = self.blocks.fetchRemove(inst).?.value;
+ defer live.deinit(self.gpa);
+
+ try self.verifyMatchingLiveness(inst, live);
+ }
+
+ try self.verifyInst(inst, .{ .none, .none, .none });
+ },
+ .loop => {
+ const ty_pl = data[inst].ty_pl;
+ const extra = self.air.extraData(Air.Block, ty_pl.payload);
+ const loop_body = self.air.extra[extra.end..][0..extra.data.body_len];
+
+ var live = try self.live.clone(self.gpa);
+ defer live.deinit(self.gpa);
+
+ try self.verifyBody(loop_body);
+
+ // The same stuff should be alive after the loop as before it
+ try self.verifyMatchingLiveness(inst, live);
+
+ try self.verifyInst(inst, .{ .none, .none, .none });
+ },
+ .cond_br => {
+ const pl_op = data[inst].pl_op;
+ const extra = self.air.extraData(Air.CondBr, pl_op.payload);
+ const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len];
+ const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
+ const cond_br_liveness = self.liveness.getCondBr(inst);
+
+ try self.verifyOperand(inst, pl_op.operand, self.liveness.operandDies(inst, 0));
+
+ var live = try self.live.clone(self.gpa);
+ defer live.deinit(self.gpa);
+
+ for (cond_br_liveness.then_deaths) |death| try self.verifyDeath(inst, death);
+ try self.verifyBody(then_body);
+
+ self.live.deinit(self.gpa);
+ self.live = live.move();
+
+ for (cond_br_liveness.else_deaths) |death| try self.verifyDeath(inst, death);
+ try self.verifyBody(else_body);
+
+ try self.verifyInst(inst, .{ .none, .none, .none });
+ },
+ .switch_br => {
+ const pl_op = data[inst].pl_op;
+ const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
+ var extra_index = switch_br.end;
+ var case_i: u32 = 0;
+ const switch_br_liveness = try self.liveness.getSwitchBr(
+ self.gpa,
+ inst,
+ switch_br.data.cases_len + 1,
+ );
+ defer self.gpa.free(switch_br_liveness.deaths);
+
+ try self.verifyOperand(inst, pl_op.operand, self.liveness.operandDies(inst, 0));
+
+ var live = self.live.move();
+ defer live.deinit(self.gpa);
+
+ while (case_i < switch_br.data.cases_len) : (case_i += 1) {
+ const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
+ const items = @ptrCast(
+ []const Air.Inst.Ref,
+ self.air.extra[case.end..][0..case.data.items_len],
+ );
+ const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
+ extra_index = case.end + items.len + case_body.len;
+
+ self.live.deinit(self.gpa);
+ self.live = try live.clone(self.gpa);
+
+ for (switch_br_liveness.deaths[case_i]) |death| try self.verifyDeath(inst, death);
+ try self.verifyBody(case_body);
+ }
+
+ const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len];
+ if (else_body.len > 0) {
+ self.live.deinit(self.gpa);
+ self.live = try live.clone(self.gpa);
+
+ for (switch_br_liveness.deaths[case_i]) |death| try self.verifyDeath(inst, death);
+ try self.verifyBody(else_body);
+ }
+
+ try self.verifyInst(inst, .{ .none, .none, .none });
+ },
+ }
+ }
+}
+
+fn verifyDeath(self: *Verify, inst: Air.Inst.Index, operand: Air.Inst.Index) Error!void {
+ try self.verifyOperand(inst, Air.indexToRef(operand), true);
+}
+
+fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies: bool) Error!void {
+ const operand = Air.refToIndex(op_ref) orelse return;
+ switch (self.air.instructions.items(.tag)[operand]) {
+ .constant, .const_ty => {},
+ else => {
+ if (dies) {
+ if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand });
+ } else {
+ if (!self.live.contains(operand)) return invalid("%{}: dead operand %{} reused", .{ inst, operand });
+ }
+ },
+ }
+}
+
+fn verifyInst(
+ self: *Verify,
+ inst: Air.Inst.Index,
+ operands: [Liveness.bpi - 1]Air.Inst.Ref,
+) Error!void {
+ for (operands, 0..) |operand, operand_index| {
+ const dies = self.liveness.operandDies(inst, @intCast(Liveness.OperandInt, operand_index));
+ try self.verifyOperand(inst, operand, dies);
+ }
+ const tag = self.air.instructions.items(.tag);
+ switch (tag[inst]) {
+ .constant, .const_ty => unreachable,
+ else => {
+ if (self.liveness.isUnused(inst)) {
+ assert(!self.live.contains(inst));
+ } else {
+ try self.live.putNoClobber(self.gpa, inst, {});
+ }
+ },
+ }
+}
+
+fn verifyMatchingLiveness(self: *Verify, block: Air.Inst.Index, live: LiveMap) Error!void {
+ if (self.live.count() != live.count()) return invalid("%{}: different deaths across branches", .{block});
+ var live_it = self.live.keyIterator();
+ while (live_it.next()) |live_inst| if (!live.contains(live_inst.*)) return invalid("%{}: different deaths across branches", .{block});
+}
+
+fn invalid(comptime fmt: []const u8, args: anytype) error{LivenessInvalid} {
+ log.err(fmt, args);
+ return error.LivenessInvalid;
+}
+
+const std = @import("std");
+const assert = std.debug.assert;
+const log = std.log.scoped(.liveness_verify);
+
+const Air = @import("../Air.zig");
+const Liveness = @import("../Liveness.zig");
+const Verify = @This();
diff --git a/src/Module.zig b/src/Module.zig
index c3dbc396df..fa91e8c1ed 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -483,6 +483,8 @@ pub const Decl = struct {
/// and attempting semantic analysis again may succeed.
sema_failure_retryable,
/// There will be a corresponding ErrorMsg in Module.failed_decls.
+ liveness_failure,
+ /// There will be a corresponding ErrorMsg in Module.failed_decls.
codegen_failure,
/// There will be a corresponding ErrorMsg in Module.failed_decls.
/// This indicates the failure was something like running out of disk space,
@@ -4129,6 +4131,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
.file_failure,
.sema_failure,
.sema_failure_retryable,
+ .liveness_failure,
.codegen_failure,
.dependency_failure,
.codegen_failure_retryable,
@@ -4222,6 +4225,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
.dependency_failure,
.sema_failure,
.sema_failure_retryable,
+ .liveness_failure,
.codegen_failure,
.codegen_failure_retryable,
.complete,
@@ -4247,6 +4251,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
.file_failure,
.sema_failure,
+ .liveness_failure,
.codegen_failure,
.dependency_failure,
.sema_failure_retryable,
@@ -4306,6 +4311,33 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
std.debug.print("# End Function AIR: {s}\n\n", .{fqn});
}
+ if (std.debug.runtime_safety) {
+ var verify = Liveness.Verify{
+ .gpa = gpa,
+ .air = air,
+ .liveness = liveness,
+ };
+ defer verify.deinit();
+
+ verify.verify() catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ else => {
+ try mod.failed_decls.ensureUnusedCapacity(gpa, 1);
+ mod.failed_decls.putAssumeCapacityNoClobber(
+ decl_index,
+ try Module.ErrorMsg.create(
+ gpa,
+ decl.srcLoc(),
+ "invalid liveness: {s}",
+ .{@errorName(err)},
+ ),
+ );
+ decl.analysis = .liveness_failure;
+ return error.AnalysisFail;
+ },
+ };
+ }
+
if (no_bin_file and !dump_llvm_ir) return;
comp.bin_file.updateFunc(mod, func, air, liveness) catch |err| switch (err) {
@@ -4349,6 +4381,7 @@ pub fn updateEmbedFile(mod: *Module, embed_file: *EmbedFile) SemaError!void {
.dependency_failure,
.sema_failure,
.sema_failure_retryable,
+ .liveness_failure,
.codegen_failure,
.codegen_failure_retryable,
.complete,
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index e08386dfcb..1acc11d7e8 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -655,6 +655,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
const air_tags = self.air.instructions.items(.tag);
for (body) |inst| {
+ // TODO: remove now-redundant isUnused calls from AIR handler functions
+ if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) {
+ continue;
+ }
+
const old_air_bookkeeping = self.air_bookkeeping;
try self.ensureProcessDeathCapacity(Liveness.bpi);
@@ -5000,17 +5005,11 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[loop.end..][0..loop.data.body_len];
- const liveness_loop = self.liveness.getLoop(inst);
const start_index = @intCast(u32, self.mir_instructions.len);
try self.genBody(body);
try self.jump(start_index);
- try self.ensureProcessDeathCapacity(liveness_loop.deaths.len);
- for (liveness_loop.deaths) |operand| {
- self.processDeath(operand);
- }
-
return self.finishAirBookkeeping();
}
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 1acf5a5164..661e713b1c 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -639,6 +639,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
const air_tags = self.air.instructions.items(.tag);
for (body) |inst| {
+ // TODO: remove now-redundant isUnused calls from AIR handler functions
+ if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) {
+ continue;
+ }
+
const old_air_bookkeeping = self.air_bookkeeping;
try self.ensureProcessDeathCapacity(Liveness.bpi);
@@ -4923,17 +4928,11 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[loop.end..][0..loop.data.body_len];
- const liveness_loop = self.liveness.getLoop(inst);
const start_index = @intCast(Mir.Inst.Index, self.mir_instructions.len);
try self.genBody(body);
try self.jump(start_index);
- try self.ensureProcessDeathCapacity(liveness_loop.deaths.len);
- for (liveness_loop.deaths) |operand| {
- self.processDeath(operand);
- }
-
return self.finishAirBookkeeping();
}
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index 16d9548da7..f0ab8b3317 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -473,6 +473,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
const air_tags = self.air.instructions.items(.tag);
for (body) |inst| {
+ // TODO: remove now-redundant isUnused calls from AIR handler functions
+ if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) {
+ continue;
+ }
+
const old_air_bookkeeping = self.air_bookkeeping;
try self.ensureProcessDeathCapacity(Liveness.bpi);
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index 5405b212a2..2bcc1e1c4e 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -489,6 +489,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
const air_tags = self.air.instructions.items(.tag);
for (body) |inst| {
+ // TODO: remove now-redundant isUnused calls from AIR handler functions
+ if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) {
+ continue;
+ }
+
const old_air_bookkeeping = self.air_bookkeeping;
try self.ensureProcessDeathCapacity(Liveness.bpi);
@@ -1750,17 +1755,11 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[loop.end .. loop.end + loop.data.body_len];
- const liveness_loop = self.liveness.getLoop(inst);
const start = @intCast(u32, self.mir_instructions.len);
try self.genBody(body);
try self.jump(start);
- try self.ensureProcessDeathCapacity(liveness_loop.deaths.len);
- for (liveness_loop.deaths) |operand| {
- self.processDeath(operand);
- }
-
return self.finishAirBookkeeping();
}
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 83af640b82..b94d3993f9 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -2009,9 +2009,11 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
for (body) |inst| {
+ if (func.liveness.isUnused(inst) and !func.air.mustLower(inst)) {
+ continue;
+ }
const old_bookkeeping_value = func.air_bookkeeping;
- // TODO: Determine why we need to pre-allocate an extra 4 possible values here.
- try func.currentBranch().values.ensureUnusedCapacity(func.gpa, Liveness.bpi + 4);
+ try func.currentBranch().values.ensureUnusedCapacity(func.gpa, Liveness.bpi);
try func.genInst(inst);
if (builtin.mode == .Debug and func.air_bookkeeping < old_bookkeeping_value + 1) {
@@ -2185,7 +2187,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
}
const result_value = result_value: {
- if (func.liveness.isUnused(inst) or (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError())) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) {
break :result_value WValue{ .none = {} };
} else if (ret_ty.isNoReturn()) {
try func.addTag(.@"unreachable");
@@ -2494,7 +2496,6 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
const ty = func.air.typeOf(bin_op.lhs);
@@ -2649,7 +2650,6 @@ const FloatOp = enum {
fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError!void {
const un_op = func.air.instructions.items(.data)[inst].un_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{un_op});
const operand = try func.resolveInst(un_op);
const ty = func.air.typeOf(un_op);
@@ -2723,7 +2723,6 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In
fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
@@ -3183,7 +3182,6 @@ fn airLoop(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const loop = func.air.extraData(Air.Block, ty_pl.payload);
const body = func.air.extra[loop.end..][0..loop.data.body_len];
- const liveness_loop = func.liveness.getLoop(inst);
// result type of loop is always 'noreturn', meaning we can always
// emit the wasm type 'block_empty'.
@@ -3194,11 +3192,6 @@ fn airLoop(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addLabel(.br, 0);
try func.endBlock();
- try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_loop.deaths.len));
- for (liveness_loop.deaths) |death| {
- func.processDeath(Air.indexToRef(death));
- }
-
func.finishAir(inst, .none, &.{});
}
@@ -3224,9 +3217,6 @@ fn airCondBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
func.branches.appendAssumeCapacity(.{});
try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.else_deaths.len));
- for (liveness_condbr.else_deaths) |death| {
- func.processDeath(Air.indexToRef(death));
- }
try func.genBody(else_body);
try func.endBlock();
var else_stack = func.branches.pop();
@@ -3235,9 +3225,6 @@ fn airCondBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// Outer block that matches the condition
func.branches.appendAssumeCapacity(.{});
try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.then_deaths.len));
- for (liveness_condbr.then_deaths) |death| {
- func.processDeath(Air.indexToRef(death));
- }
try func.genBody(then_body);
var then_stack = func.branches.pop();
defer then_stack.deinit(func.gpa);
@@ -3255,7 +3242,7 @@ fn mergeBranch(func: *CodeGen, branch: *const Branch) !void {
const target_keys = target_slice.items(.key);
const target_values = target_slice.items(.value);
- try parent.values.ensureUnusedCapacity(func.gpa, branch.values.count());
+ try parent.values.ensureTotalCapacity(func.gpa, parent.values.capacity() + branch.values.count());
for (target_keys, 0..) |key, index| {
// TODO: process deaths from branches
parent.values.putAssumeCapacity(key, target_values[index]);
@@ -3264,7 +3251,6 @@ fn mergeBranch(func: *CodeGen, branch: *const Branch) !void {
fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!void {
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
@@ -3381,7 +3367,6 @@ fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const operand = try func.resolveInst(ty_op.operand);
const operand_ty = func.air.typeOf(ty_op.operand);
@@ -3447,7 +3432,7 @@ fn airUnreachable(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- const result = if (!func.liveness.isUnused(inst)) result: {
+ const result = result: {
const operand = try func.resolveInst(ty_op.operand);
const wanted_ty = func.air.typeOfIndex(inst);
const given_ty = func.air.typeOf(ty_op.operand);
@@ -3456,7 +3441,7 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result try bitcast_result.toLocal(func, wanted_ty);
}
break :result func.reuseOperand(ty_op.operand, operand);
- } else WValue{ .none = {} };
+ };
func.finishAir(inst, result, &.{ty_op.operand});
}
@@ -3480,7 +3465,6 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn
fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.StructField, ty_pl.payload);
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{extra.data.struct_operand});
const struct_ptr = try func.resolveInst(extra.data.struct_operand);
const struct_ty = func.air.typeOf(extra.data.struct_operand).childType();
@@ -3490,7 +3474,6 @@ fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const struct_ptr = try func.resolveInst(ty_op.operand);
const struct_ty = func.air.typeOf(ty_op.operand).childType();
@@ -3535,7 +3518,6 @@ fn structFieldPtr(
fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
const struct_ty = func.air.typeOf(struct_field.struct_operand);
const operand = try func.resolveInst(struct_field.struct_operand);
@@ -3801,7 +3783,6 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!void {
const un_op = func.air.instructions.items(.data)[inst].un_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{un_op});
const operand = try func.resolveInst(un_op);
const err_union_ty = func.air.typeOf(un_op);
const pl_ty = err_union_ty.errorUnionPayload();
@@ -3836,7 +3817,6 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const operand = try func.resolveInst(ty_op.operand);
const op_ty = func.air.typeOf(ty_op.operand);
@@ -3859,7 +3839,6 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo
fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const operand = try func.resolveInst(ty_op.operand);
const op_ty = func.air.typeOf(ty_op.operand);
@@ -3883,7 +3862,6 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool)
fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const operand = try func.resolveInst(ty_op.operand);
const err_ty = func.air.typeOfIndex(inst);
@@ -3910,7 +3888,6 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void
fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const operand = try func.resolveInst(ty_op.operand);
const err_ty = func.air.getRefType(ty_op.ty);
@@ -3937,7 +3914,6 @@ fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const ty = func.air.getRefType(ty_op.ty);
const operand = try func.resolveInst(ty_op.operand);
@@ -4004,7 +3980,6 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void {
const un_op = func.air.instructions.items(.data)[inst].un_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{un_op});
const operand = try func.resolveInst(un_op);
const op_ty = func.air.typeOf(un_op);
@@ -4049,7 +4024,7 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const opt_ty = func.air.typeOf(ty_op.operand);
const payload_ty = func.air.typeOfIndex(inst);
- if (func.liveness.isUnused(inst) or !payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
return func.finishAir(inst, .none, &.{ty_op.operand});
}
@@ -4069,7 +4044,6 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const operand = try func.resolveInst(ty_op.operand);
const opt_ty = func.air.typeOf(ty_op.operand).childType();
@@ -4114,7 +4088,6 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const payload_ty = func.air.typeOf(ty_op.operand);
const result = result: {
@@ -4153,7 +4126,6 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
@@ -4168,7 +4140,6 @@ fn airSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const operand = try func.resolveInst(ty_op.operand);
const len = try func.load(operand, Type.usize, func.ptrSize());
@@ -4178,7 +4149,6 @@ fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const slice_ty = func.air.typeOf(bin_op.lhs);
const slice = try func.resolveInst(bin_op.lhs);
@@ -4209,7 +4179,6 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const elem_ty = func.air.getRefType(ty_pl.ty).childType();
const elem_size = elem_ty.abiSize(func.target);
@@ -4232,7 +4201,6 @@ fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airSlicePtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const operand = try func.resolveInst(ty_op.operand);
const ptr = try func.load(operand, Type.usize, 0);
const result = try ptr.toLocal(func, Type.usize);
@@ -4241,7 +4209,6 @@ fn airSlicePtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const operand = try func.resolveInst(ty_op.operand);
const wanted_ty = func.air.getRefType(ty_op.ty);
@@ -4270,19 +4237,14 @@ fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) Inner
fn airBoolToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const un_op = func.air.instructions.items(.data)[inst].un_op;
- const result = if (func.liveness.isUnused(inst))
- WValue{ .none = {} }
- else result: {
- const operand = try func.resolveInst(un_op);
- break :result func.reuseOperand(un_op, operand);
- };
+ const operand = try func.resolveInst(un_op);
+ const result = func.reuseOperand(un_op, operand);
func.finishAir(inst, result, &.{un_op});
}
fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const operand = try func.resolveInst(ty_op.operand);
const array_ty = func.air.typeOf(ty_op.operand).childType();
@@ -4305,7 +4267,6 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const un_op = func.air.instructions.items(.data)[inst].un_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{un_op});
const operand = try func.resolveInst(un_op);
const result = switch (operand) {
@@ -4318,7 +4279,6 @@ fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const ptr_ty = func.air.typeOf(bin_op.lhs);
const ptr = try func.resolveInst(bin_op.lhs);
@@ -4356,7 +4316,6 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const ptr_ty = func.air.typeOf(bin_op.lhs);
const elem_ty = func.air.getRefType(ty_pl.ty).childType();
@@ -4386,7 +4345,6 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const ptr = try func.resolveInst(bin_op.lhs);
const offset = try func.resolveInst(bin_op.rhs);
@@ -4510,7 +4468,6 @@ fn memset(func: *CodeGen, ptr: WValue, len: WValue, value: WValue) InnerError!vo
fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const array_ty = func.air.typeOf(bin_op.lhs);
const array = try func.resolveInst(bin_op.lhs);
@@ -4579,7 +4536,6 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const operand = try func.resolveInst(ty_op.operand);
const dest_ty = func.air.typeOfIndex(inst);
@@ -4604,7 +4560,6 @@ fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const operand = try func.resolveInst(ty_op.operand);
const dest_ty = func.air.typeOfIndex(inst);
@@ -4719,10 +4674,6 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const child_ty = inst_ty.childType();
const elem_size = child_ty.abiSize(func.target);
- if (func.liveness.isUnused(inst)) {
- return func.finishAir(inst, .none, &.{ extra.a, extra.b });
- }
-
const module = func.bin_file.base.options.module.?;
// TODO: One of them could be by ref; handle in loop
if (isByRef(func.air.typeOf(extra.a), func.target) or isByRef(inst_ty, func.target)) {
@@ -4788,7 +4739,6 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]);
const result: WValue = result_value: {
- if (func.liveness.isUnused(inst)) break :result_value WValue.none;
switch (result_ty.zigTypeTag()) {
.Array => {
const result = try func.allocStack(result_ty);
@@ -4894,7 +4844,6 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{extra.init});
const result = result: {
const union_ty = func.air.typeOfIndex(inst);
@@ -4933,7 +4882,6 @@ fn airPrefetch(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airWasmMemorySize(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pl_op = func.air.instructions.items(.data)[inst].pl_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{pl_op.operand});
const result = try func.allocLocal(func.air.typeOfIndex(inst));
try func.addLabel(.memory_size, pl_op.payload);
@@ -4943,7 +4891,6 @@ fn airWasmMemorySize(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void {
const pl_op = func.air.instructions.items(.data)[inst].pl_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{pl_op.operand});
const operand = try func.resolveInst(pl_op.operand);
const result = try func.allocLocal(func.air.typeOfIndex(inst));
@@ -5055,7 +5002,6 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const un_ty = func.air.typeOf(ty_op.operand);
const tag_ty = func.air.typeOfIndex(inst);
@@ -5075,7 +5021,6 @@ fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airFpext(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const dest_ty = func.air.typeOfIndex(inst);
const operand = try func.resolveInst(ty_op.operand);
@@ -5121,7 +5066,6 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!
fn airFptrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const dest_ty = func.air.typeOfIndex(inst);
const operand = try func.resolveInst(ty_op.operand);
@@ -5162,7 +5106,6 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const err_set_ty = func.air.typeOf(ty_op.operand).childType();
const payload_ty = err_set_ty.errorUnionPayload();
@@ -5177,8 +5120,6 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
);
const result = result: {
- if (func.liveness.isUnused(inst)) break :result WValue{ .none = {} };
-
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
break :result func.reuseOperand(ty_op.operand, operand);
}
@@ -5191,7 +5132,6 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{extra.field_ptr});
const field_ptr = try func.resolveInst(extra.field_ptr);
const parent_ty = func.air.getRefType(ty_pl.ty).childType();
@@ -5231,7 +5171,6 @@ fn airRetAddr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const operand = try func.resolveInst(ty_op.operand);
const op_ty = func.air.typeOf(ty_op.operand);
@@ -5276,7 +5215,6 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const un_op = func.air.instructions.items(.data)[inst].un_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{un_op});
const operand = try func.resolveInst(un_op);
// First retrieve the symbol index to the error name table
@@ -5318,7 +5256,6 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airPtrSliceFieldPtr(func: *CodeGen, inst: Air.Inst.Index, offset: u32) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const slice_ptr = try func.resolveInst(ty_op.operand);
const result = try func.buildPointerOffset(slice_ptr, offset, .new);
func.finishAir(inst, result, &.{ty_op.operand});
@@ -5328,7 +5265,6 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
assert(op == .add or op == .sub);
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ extra.lhs, extra.rhs });
const lhs_op = try func.resolveInst(extra.lhs);
const rhs_op = try func.resolveInst(extra.rhs);
@@ -5471,7 +5407,6 @@ fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type,
fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ extra.lhs, extra.rhs });
const lhs = try func.resolveInst(extra.lhs);
const rhs = try func.resolveInst(extra.rhs);
@@ -5519,7 +5454,6 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ extra.lhs, extra.rhs });
const lhs = try func.resolveInst(extra.lhs);
const rhs = try func.resolveInst(extra.rhs);
@@ -5605,7 +5539,6 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerError!void {
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const ty = func.air.typeOfIndex(inst);
if (ty.zigTypeTag() == .Vector) {
@@ -5637,8 +5570,6 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerE
fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pl_op = func.air.instructions.items(.data)[inst].pl_op;
const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data;
- if (func.liveness.isUnused(inst))
- return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand });
const ty = func.air.typeOfIndex(inst);
if (ty.zigTypeTag() == .Vector) {
@@ -5671,7 +5602,6 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const ty = func.air.typeOf(ty_op.operand);
const result_ty = func.air.typeOfIndex(inst);
@@ -5724,7 +5654,6 @@ fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const ty = func.air.typeOf(ty_op.operand);
const result_ty = func.air.typeOfIndex(inst);
@@ -5892,7 +5821,6 @@ fn lowerTry(
fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
const ty = func.air.typeOfIndex(inst);
const operand = try func.resolveInst(ty_op.operand);
@@ -5963,7 +5891,6 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const ty = func.air.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
@@ -5978,7 +5905,6 @@ fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const ty = func.air.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
@@ -6127,7 +6053,6 @@ fn signAbsValue(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
assert(op == .add or op == .sub);
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const ty = func.air.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
@@ -6240,7 +6165,6 @@ fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type,
fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const ty = func.air.typeOfIndex(inst);
const int_info = ty.intInfo(func.target);
@@ -6399,7 +6323,6 @@ fn callIntrinsic(
fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const un_op = func.air.instructions.items(.data)[inst].un_op;
- if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{un_op});
const operand = try func.resolveInst(un_op);
const enum_ty = func.air.typeOf(un_op);
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 0a59389839..1c72e2296b 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -79,14 +79,8 @@ end_di_column: u32,
/// which is a relative jump, based on the address following the reloc.
exitlude_jump_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{},
-/// Whenever there is a runtime branch, we push a Branch onto this stack,
-/// and pop it off when the runtime branch joins. This provides an "overlay"
-/// of the table of mappings from instructions to `MCValue` from within the branch.
-/// This way we can modify the `MCValue` for an instruction in different ways
-/// within different branches. Special consideration is needed when a branch
-/// joins with its parent, to make sure all instructions have the same MCValue
-/// across each runtime branch upon joining.
-branch_stack: *std.ArrayList(Branch),
+const_tracking: InstTrackingMap = .{},
+inst_tracking: InstTrackingMap = .{},
// Key is the block instruction
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{},
@@ -95,6 +89,9 @@ register_manager: RegisterManager = .{},
/// Maps offset to what is stored there.
stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{},
+/// Generation of the current scope, increments by 1 for every entered scope.
+scope_generation: u32 = 0,
+
/// Offset from the stack base, representing the end of the stack frame.
max_end_stack: u32 = 0,
/// Represents the current end stack offset. If there is no existing slot
@@ -105,10 +102,12 @@ next_stack_offset: u32 = 0,
air_bookkeeping: @TypeOf(air_bookkeeping_init) = air_bookkeeping_init,
/// For mir debug info, maps a mir index to a air index
-mir_to_air_map: if (builtin.mode == .Debug) std.AutoHashMap(Mir.Inst.Index, Air.Inst.Index) else void,
+mir_to_air_map: @TypeOf(mir_to_air_map_init) = mir_to_air_map_init,
const air_bookkeeping_init = if (std.debug.runtime_safety) @as(usize, 0) else {};
+const mir_to_air_map_init = if (builtin.mode == .Debug) std.AutoHashMapUnmanaged(Mir.Inst.Index, Air.Inst.Index){} else {};
+
pub const MCValue = union(enum) {
/// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc.
/// TODO Look into deleting this tag and using `dead` instead, since every use
@@ -117,7 +116,8 @@ pub const MCValue = union(enum) {
/// Control flow will not allow this value to be observed.
unreach,
/// No more references to this value remain.
- dead,
+ /// The payload is the value of scope_generation at the point where the death occurred
+ dead: u32,
/// The value is undefined.
undef,
/// A pointer-sized integer that fits in a register.
@@ -183,47 +183,95 @@ pub const MCValue = union(enum) {
}
};
-const Branch = struct {
- inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
+const InstTrackingMap = std.AutoArrayHashMapUnmanaged(Air.Inst.Index, InstTracking);
+const InstTracking = struct {
+ long: MCValue,
+ short: MCValue,
- fn deinit(self: *Branch, gpa: Allocator) void {
- self.inst_table.deinit(gpa);
- self.* = undefined;
+ fn init(result: MCValue) InstTracking {
+ return .{ .long = result, .short = result };
}
- const FormatContext = struct {
- insts: []const Air.Inst.Index,
- mcvs: []const MCValue,
- };
+ fn getReg(self: InstTracking) ?Register {
+ return switch (self.short) {
+ .register => |reg| reg,
+ .register_overflow => |ro| ro.reg,
+ else => null,
+ };
+ }
- fn fmt(
- ctx: FormatContext,
- comptime unused_format_string: []const u8,
- options: std.fmt.FormatOptions,
- writer: anytype,
- ) @TypeOf(writer).Error!void {
- _ = options;
- comptime assert(unused_format_string.len == 0);
- try writer.writeAll("Branch {\n");
- for (ctx.insts, ctx.mcvs) |inst, mcv| {
- try writer.print(" %{d} => {}\n", .{ inst, mcv });
+ fn getCondition(self: InstTracking) ?Condition {
+ return switch (self.short) {
+ .eflags => |eflags| eflags,
+ .register_overflow => |ro| ro.eflags,
+ else => null,
+ };
+ }
+
+ fn spill(self: *InstTracking, function: *Self, inst: Air.Inst.Index) !void {
+ switch (self.long) {
+ .none,
+ .dead,
+ .unreach,
+ => unreachable,
+ .register,
+ .register_overflow,
+ .eflags,
+ => self.long = try function.allocRegOrMem(inst, self.short == .eflags),
+ .stack_offset => {},
+ .undef,
+ .immediate,
+ .memory,
+ .load_direct,
+ .lea_direct,
+ .load_got,
+ .load_tlv,
+ .lea_tlv,
+ .ptr_stack_offset,
+ => return, // these can be rematerialized without using a stack slot
+ }
+ log.debug("spilling %{d} from {} to {}", .{ inst, self.short, self.long });
+ const ty = function.air.typeOfIndex(inst);
+ try function.setRegOrMem(ty, self.long, self.short);
+ }
+
+ fn trackSpill(self: *InstTracking, function: *Self) void {
+ if (self.getReg()) |reg| function.register_manager.freeReg(reg);
+ switch (self.short) {
+ .none, .dead, .unreach => unreachable,
+ else => {},
}
- try writer.writeAll("}");
+ self.short = self.long;
}
- fn format(branch: Branch, comptime unused_format_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
- _ = branch;
- _ = unused_format_string;
- _ = options;
- _ = writer;
- @compileError("do not format Branch directly; use ty.fmtDebug()");
+ fn materialize(self: *InstTracking, function: *Self, inst: Air.Inst.Index, reg: Register) !void {
+ const ty = function.air.typeOfIndex(inst);
+ try function.genSetReg(ty, reg, self.long);
}
- fn fmtDebug(self: @This()) std.fmt.Formatter(fmt) {
- return .{ .data = .{
- .insts = self.inst_table.keys(),
- .mcvs = self.inst_table.values(),
- } };
+ fn trackMaterialize(self: *InstTracking, function: *Self, inst: Air.Inst.Index, reg: Register) void {
+ assert(inst == function.register_manager.registers[
+ RegisterManager.indexOfRegIntoTracked(reg).?
+ ]);
+ self.short = .{ .register = reg };
+ }
+
+ fn resurrect(self: *InstTracking, scope_generation: u32) void {
+ switch (self.short) {
+ .dead => |die_generation| if (die_generation >= scope_generation) {
+ self.short = self.long;
+ },
+ else => {},
+ }
+ }
+
+ fn die(self: *InstTracking, function: *Self) void {
+ function.freeValue(self.short);
+ self.reuse(function);
+ }
+
+ fn reuse(self: *InstTracking, function: *Self) void {
+ self.short = .{ .dead = function.scope_generation };
}
};
@@ -235,39 +283,14 @@ const StackAllocation = struct {
const BlockData = struct {
relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{},
- branch: Branch = .{},
- branch_depth: u32,
+ state: State,
fn deinit(self: *BlockData, gpa: Allocator) void {
- self.branch.deinit(gpa);
self.relocs.deinit(gpa);
self.* = undefined;
}
};
-const BigTomb = struct {
- function: *Self,
- inst: Air.Inst.Index,
- lbt: Liveness.BigTomb,
-
- fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) void {
- const dies = bt.lbt.feed();
- const op_index = Air.refToIndex(op_ref) orelse return;
- if (!dies) return;
- bt.function.processDeath(op_index);
- }
-
- fn finishAir(bt: *BigTomb, result: MCValue) void {
- const is_used = !bt.function.liveness.isUnused(bt.inst);
- if (is_used) {
- log.debug(" (saving %{d} => {})", .{ bt.inst, result });
- const branch = &bt.function.branch_stack.items[bt.function.branch_stack.items.len - 1];
- branch.inst_table.putAssumeCapacityNoClobber(bt.inst, result);
- }
- bt.function.finishAirBookkeeping();
- }
-};
-
const Self = @This();
pub fn generate(
@@ -294,19 +317,9 @@ pub fn generate(
stderr.writeAll(":\n") catch {};
}
- var branch_stack = std.ArrayList(Branch).init(bin_file.allocator);
- try branch_stack.ensureUnusedCapacity(2);
- // The outermost branch is used for constants only.
- branch_stack.appendAssumeCapacity(.{});
- branch_stack.appendAssumeCapacity(.{});
- defer {
- assert(branch_stack.items.len == 2);
- for (branch_stack.items) |*branch| branch.deinit(bin_file.allocator);
- branch_stack.deinit();
- }
-
+ const gpa = bin_file.allocator;
var function = Self{
- .gpa = bin_file.allocator,
+ .gpa = gpa,
.air = air,
.liveness = liveness,
.target = &bin_file.options.target,
@@ -318,21 +331,23 @@ pub fn generate(
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
.fn_type = fn_type,
.arg_index = 0,
- .branch_stack = &branch_stack,
.src_loc = src_loc,
.stack_align = undefined,
.end_di_line = module_fn.rbrace_line,
.end_di_column = module_fn.rbrace_column,
- .mir_to_air_map = if (builtin.mode == .Debug)
- std.AutoHashMap(Mir.Inst.Index, Air.Inst.Index).init(bin_file.allocator)
- else {},
};
- defer function.stack.deinit(bin_file.allocator);
- defer function.blocks.deinit(bin_file.allocator);
- defer function.exitlude_jump_relocs.deinit(bin_file.allocator);
- defer function.mir_instructions.deinit(bin_file.allocator);
- defer function.mir_extra.deinit(bin_file.allocator);
- defer if (builtin.mode == .Debug) function.mir_to_air_map.deinit();
+ defer {
+ function.stack.deinit(gpa);
+ var block_it = function.blocks.valueIterator();
+ while (block_it.next()) |block| block.deinit(gpa);
+ function.blocks.deinit(gpa);
+ function.inst_tracking.deinit(gpa);
+ function.const_tracking.deinit(gpa);
+ function.exitlude_jump_relocs.deinit(gpa);
+ function.mir_instructions.deinit(gpa);
+ function.mir_extra.deinit(gpa);
+ if (builtin.mode == .Debug) function.mir_to_air_map.deinit(gpa);
+ }
var call_info = function.resolveCallingConventionValues(fn_type, &.{}) catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
@@ -905,11 +920,12 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
const air_tags = self.air.instructions.items(.tag);
for (body) |inst| {
- const old_air_bookkeeping = self.air_bookkeeping;
- try self.ensureProcessDeathCapacity(Liveness.bpi);
if (builtin.mode == .Debug) {
- try self.mir_to_air_map.put(@intCast(Mir.Inst.Index, self.mir_instructions.len), inst);
+ const mir_inst = @intCast(Mir.Inst.Index, self.mir_instructions.len);
+ try self.mir_to_air_map.put(self.gpa, mir_inst, inst);
}
+
+ if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) continue;
if (debug_wip_mir) @import("../../print_air.zig").dumpInst(
inst,
self.bin_file.options.module.?,
@@ -917,6 +933,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
self.liveness,
);
+ const old_air_bookkeeping = self.air_bookkeeping;
+ try self.inst_tracking.ensureUnusedCapacity(self.gpa, 1);
switch (air_tags[inst]) {
// zig fmt: off
.not,
@@ -1080,7 +1098,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.field_parent_ptr => try self.airFieldParentPtr(inst),
- .switch_br => try self.airSwitch(inst),
+ .switch_br => try self.airSwitchBr(inst),
.slice_ptr => try self.airSlicePtr(inst),
.slice_len => try self.airSliceLen(inst),
@@ -1166,8 +1184,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
var it = self.register_manager.free_registers.iterator(.{ .kind = .unset });
while (it.next()) |index| {
const tracked_inst = self.register_manager.registers[index];
- const tracked_mcv = self.getResolvedInstValue(tracked_inst).?.*;
- assert(RegisterManager.indexOfRegIntoTracked(switch (tracked_mcv) {
+ const tracking = self.getResolvedInstValue(tracked_inst);
+ assert(RegisterManager.indexOfRegIntoTracked(switch (tracking.short) {
.register => |reg| reg,
.register_overflow => |ro| ro.reg,
else => unreachable,
@@ -1205,16 +1223,16 @@ fn freeValue(self: *Self, value: MCValue) void {
}
}
+fn feed(self: *Self, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) void {
+ if (bt.feed()) if (Air.refToIndex(operand)) |inst| self.processDeath(inst);
+}
+
/// Asserts there is already capacity to insert into top branch inst_table.
fn processDeath(self: *Self, inst: Air.Inst.Index) void {
const air_tags = self.air.instructions.items(.tag);
- if (air_tags[inst] == .constant) return; // Constants are immortal.
- const prev_value = (self.getResolvedInstValue(inst) orelse return).*;
+ if (air_tags[inst] == .constant) return;
log.debug("%{d} => {}", .{ inst, MCValue.dead });
- // When editing this function, note that the logic must synchronize with `reuseOperand`.
- const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
- branch.inst_table.putAssumeCapacity(inst, .dead);
- self.freeValue(prev_value);
+ self.inst_tracking.getPtr(inst).?.die(self);
}
/// Called when there are no operands, and the instruction is always unreferenced.
@@ -1224,6 +1242,21 @@ fn finishAirBookkeeping(self: *Self) void {
}
}
+fn finishAirResult(self: *Self, inst: Air.Inst.Index, result: MCValue) void {
+ if (self.liveness.isUnused(inst)) switch (result) {
+ .none, .dead, .unreach => {},
+ else => unreachable, // Why didn't the result die?
+ } else {
+ log.debug("%{d} => {}", .{ inst, result });
+ self.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(result));
+ // In some cases, an operand may be reused as the result.
+ // If that operand died and was a register, it was freed by
+ // processDeath, so we have to "re-allocate" the register.
+ self.getValue(result, inst);
+ }
+ self.finishAirBookkeeping();
+}
+
fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void {
var tomb_bits = self.liveness.getTombBits(inst);
for (operands) |op| {
@@ -1235,26 +1268,7 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live
const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
self.processDeath(op_index);
}
- const is_used = @truncate(u1, tomb_bits) == 0;
- if (is_used) {
- log.debug("%{d} => {}", .{ inst, result });
- const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
- branch.inst_table.putAssumeCapacityNoClobber(inst, result);
- // In some cases, an operand may be reused as the result.
- // If that operand died and was a register, it was freed by
- // processDeath, so we have to "re-allocate" the register.
- self.getValue(result, inst);
- } else switch (result) {
- .none, .dead, .unreach => {},
- else => unreachable, // Why didn't the result die?
- }
- self.finishAirBookkeeping();
-}
-
-fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
- // In addition to the caller's needs, we need enough space to spill every register and eflags.
- const table = &self.branch_stack.items[self.branch_stack.items.len - 1].inst_table;
- try table.ensureUnusedCapacity(self.gpa, additional_count + self.register_manager.registers.len + 1);
+ self.finishAirResult(inst, result);
}
fn allocMem(self: *Self, inst: ?Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 {
@@ -1339,66 +1353,115 @@ fn allocRegOrMemAdvanced(self: *Self, elem_ty: Type, inst: ?Air.Inst.Index, reg_
}
const State = struct {
- registers: abi.RegisterManager.TrackedRegisters,
- free_registers: abi.RegisterManager.RegisterBitSet,
- eflags_inst: ?Air.Inst.Index,
+ registers: RegisterManager.TrackedRegisters,
+ free_registers: RegisterManager.RegisterBitSet,
+ inst_tracking_len: u32,
+ scope_generation: u32,
};
-fn captureState(self: *Self) State {
- return State{
- .registers = self.register_manager.registers,
- .free_registers = self.register_manager.free_registers,
- .eflags_inst = self.eflags_inst,
- };
+fn initRetroactiveState(self: *Self) State {
+ var state: State = undefined;
+ state.inst_tracking_len = @intCast(u32, self.inst_tracking.count());
+ state.scope_generation = self.scope_generation;
+ return state;
}
-fn revertState(self: *Self, state: State) void {
- self.eflags_inst = state.eflags_inst;
- self.register_manager.free_registers = state.free_registers;
- self.register_manager.registers = state.registers;
+fn saveRetroactiveState(self: *Self, state: *State) !void {
+ try self.spillEflagsIfOccupied();
+ state.registers = self.register_manager.registers;
+ state.free_registers = self.register_manager.free_registers;
}
-pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void {
- const stack_mcv = try self.allocRegOrMem(inst, false);
- log.debug("spilling %{d} to stack mcv {any}", .{ inst, stack_mcv });
- const reg_mcv = self.getResolvedInstValue(inst).?.*;
- switch (reg_mcv) {
- .register => |other| {
- assert(reg.to64() == other.to64());
- },
- .register_overflow => |ro| {
- assert(reg.to64() == ro.reg.to64());
- },
- else => {},
- }
- const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
- branch.inst_table.putAssumeCapacity(inst, stack_mcv);
- try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv, .{});
+fn saveState(self: *Self) !State {
+ var state = self.initRetroactiveState();
+ try self.saveRetroactiveState(&state);
+ return state;
}
-pub fn spillEflagsIfOccupied(self: *Self) !void {
- if (self.eflags_inst) |inst_to_save| {
- const mcv = self.getResolvedInstValue(inst_to_save).?.*;
- const new_mcv = switch (mcv) {
- .register_overflow => try self.allocRegOrMem(inst_to_save, false),
- .eflags => try self.allocRegOrMem(inst_to_save, true),
- else => unreachable,
- };
-
- try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv);
- log.debug("spilling %{d} to mcv {any}", .{ inst_to_save, new_mcv });
+fn restoreState(self: *Self, state: State, deaths: []const Air.Inst.Index, comptime opts: struct {
+ emit_instructions: bool,
+ update_tracking: bool,
+ resurrect: bool,
+ close_scope: bool,
+}) !void {
+ if (opts.close_scope) {
+ for (self.inst_tracking.values()[state.inst_tracking_len..]) |*tracking| tracking.die(self);
+ self.inst_tracking.shrinkRetainingCapacity(state.inst_tracking_len);
+ }
- const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
- branch.inst_table.putAssumeCapacity(inst_to_save, new_mcv);
+ if (opts.resurrect) for (self.inst_tracking.values()[0..state.inst_tracking_len]) |*tracking|
+ tracking.resurrect(state.scope_generation);
+ const air_tags = self.air.instructions.items(.tag);
+ for (deaths) |death| switch (air_tags[death]) {
+ .constant => {},
+ .const_ty => unreachable,
+ else => self.inst_tracking.getPtr(death).?.die(self),
+ };
+ for (0..state.registers.len) |index| {
+ const current_maybe_inst = if (self.register_manager.free_registers.isSet(index))
+ null
+ else
+ self.register_manager.registers[index];
+ const target_maybe_inst = if (state.free_registers.isSet(index))
+ null
+ else
+ state.registers[index];
+ if (std.debug.runtime_safety) if (target_maybe_inst) |target_inst|
+ assert(self.inst_tracking.getIndex(target_inst).? < state.inst_tracking_len);
+ if (current_maybe_inst == target_maybe_inst) continue;
+ const reg = RegisterManager.regAtTrackedIndex(
+ @intCast(RegisterManager.RegisterBitSet.ShiftInt, index),
+ );
+ if (opts.emit_instructions) {
+ if (current_maybe_inst) |current_inst| {
+ try self.inst_tracking.getPtr(current_inst).?.spill(self, current_inst);
+ }
+ if (target_maybe_inst) |target_inst| {
+ try self.inst_tracking.getPtr(target_inst).?.materialize(self, target_inst, reg);
+ }
+ }
+ if (opts.update_tracking) {
+ if (current_maybe_inst) |current_inst| {
+ self.inst_tracking.getPtr(current_inst).?.trackSpill(self);
+ }
+ self.register_manager.freeReg(reg);
+ self.register_manager.getRegAssumeFree(reg, target_maybe_inst);
+ if (target_maybe_inst) |target_inst| {
+ self.inst_tracking.getPtr(target_inst).?.trackMaterialize(self, target_inst, reg);
+ }
+ }
+ }
+ if (opts.emit_instructions) if (self.eflags_inst) |inst|
+ try self.inst_tracking.getPtr(inst).?.spill(self, inst);
+ if (opts.update_tracking) if (self.eflags_inst) |inst| {
self.eflags_inst = null;
+ self.inst_tracking.getPtr(inst).?.trackSpill(self);
+ };
- // TODO consolidate with register manager and spillInstruction
- // this call should really belong in the register manager!
- switch (mcv) {
- .register_overflow => |ro| self.register_manager.freeReg(ro.reg),
- else => {},
- }
+ if (opts.update_tracking and std.debug.runtime_safety) {
+ assert(self.eflags_inst == null);
+ assert(self.register_manager.free_registers.eql(state.free_registers));
+ var used_reg_it = state.free_registers.iterator(.{ .kind = .unset });
+ while (used_reg_it.next()) |index|
+ assert(self.register_manager.registers[index] == state.registers[index]);
+ }
+}
+
+pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void {
+ const tracking = self.inst_tracking.getPtr(inst).?;
+ assert(tracking.getReg().?.to64() == reg.to64());
+ try tracking.spill(self, inst);
+ tracking.trackSpill(self);
+}
+
+pub fn spillEflagsIfOccupied(self: *Self) !void {
+ if (self.eflags_inst) |inst| {
+ self.eflags_inst = null;
+ const tracking = self.inst_tracking.getPtr(inst).?;
+ assert(tracking.getCondition() != null);
+ try tracking.spill(self, inst);
+ tracking.trackSpill(self);
}
}
@@ -1442,22 +1505,14 @@ fn copyToRegisterWithInstTracking(self: *Self, reg_owner: Air.Inst.Index, ty: Ty
}
fn airAlloc(self: *Self, inst: Air.Inst.Index) !void {
- const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
-
- const stack_offset = try self.allocMemPtr(inst);
- break :result .{ .ptr_stack_offset = @intCast(i32, stack_offset) };
- };
+ const stack_offset = try self.allocMemPtr(inst);
+ const result = MCValue{ .ptr_stack_offset = @intCast(i32, stack_offset) };
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void {
- const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
-
- const stack_offset = try self.allocMemPtr(inst);
- break :result .{ .ptr_stack_offset = @intCast(i32, stack_offset) };
- };
+ const stack_offset = try self.allocMemPtr(inst);
+ const result = MCValue{ .ptr_stack_offset = @intCast(i32, stack_offset) };
return self.finishAir(inst, result, .{ .none, .none, .none });
}
@@ -1477,126 +1532,125 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result = if (self.liveness.isUnused(inst)) .dead else result: {
- const src_ty = self.air.typeOf(ty_op.operand);
- const src_int_info = src_ty.intInfo(self.target.*);
- const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*));
- const src_mcv = try self.resolveInst(ty_op.operand);
- const src_lock = switch (src_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
-
- const dst_ty = self.air.typeOfIndex(inst);
- const dst_int_info = dst_ty.intInfo(self.target.*);
- const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
- const dst_mcv = if (dst_abi_size <= src_abi_size and
- self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
- src_mcv
- else
- try self.allocRegOrMem(inst, true);
- const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty;
- const signedness: std.builtin.Signedness = if (dst_int_info.signedness == .signed and
- src_int_info.signedness == .signed) .signed else .unsigned;
- switch (dst_mcv) {
- .register => |dst_reg| {
- const min_abi_size = @min(dst_abi_size, src_abi_size);
- const tag: Mir.Inst.Tag = switch (signedness) {
- .signed => .movsx,
- .unsigned => if (min_abi_size > 2) .mov else .movzx,
- };
- const dst_alias = switch (tag) {
- .movsx => dst_reg.to64(),
- .mov, .movzx => if (min_abi_size > 4) dst_reg.to64() else dst_reg.to32(),
- else => unreachable,
- };
- switch (src_mcv) {
- .register => |src_reg| {
- try self.asmRegisterRegister(
- tag,
- dst_alias,
- registerAlias(src_reg, min_abi_size),
- );
- },
- .stack_offset => |src_off| {
- try self.asmRegisterMemory(tag, dst_alias, Memory.sib(
- Memory.PtrSize.fromSize(min_abi_size),
- .{ .base = .rbp, .disp = -src_off },
- ));
- },
- else => return self.fail("TODO airIntCast from {s} to {s}", .{
- @tagName(src_mcv),
- @tagName(dst_mcv),
- }),
- }
- if (self.regExtraBits(min_ty) > 0) try self.truncateRegister(min_ty, dst_reg);
- },
- else => {
- try self.setRegOrMem(min_ty, dst_mcv, src_mcv);
- const extra = dst_abi_size * 8 - dst_int_info.bits;
- if (extra > 0) {
- try self.genShiftBinOpMir(switch (signedness) {
- .signed => .sal,
- .unsigned => .shl,
- }, dst_ty, dst_mcv, .{ .immediate = extra });
- try self.genShiftBinOpMir(switch (signedness) {
- .signed => .sar,
- .unsigned => .shr,
- }, dst_ty, dst_mcv, .{ .immediate = extra });
- }
- },
- }
- break :result dst_mcv;
+ const src_ty = self.air.typeOf(ty_op.operand);
+ const src_int_info = src_ty.intInfo(self.target.*);
+ const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*));
+ const src_mcv = try self.resolveInst(ty_op.operand);
+ const src_lock = switch (src_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
};
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const dst_ty = self.air.typeOfIndex(inst);
+ const dst_int_info = dst_ty.intInfo(self.target.*);
+ const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
+ const dst_mcv = if (dst_abi_size <= src_abi_size and
+ self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
+ src_mcv
+ else
+ try self.allocRegOrMem(inst, true);
+
+ const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty;
+ const signedness: std.builtin.Signedness = if (dst_int_info.signedness == .signed and
+ src_int_info.signedness == .signed) .signed else .unsigned;
+ switch (dst_mcv) {
+ .register => |dst_reg| {
+ const min_abi_size = @min(dst_abi_size, src_abi_size);
+ const tag: Mir.Inst.Tag = switch (signedness) {
+ .signed => .movsx,
+ .unsigned => if (min_abi_size > 2) .mov else .movzx,
+ };
+ const dst_alias = switch (tag) {
+ .movsx => dst_reg.to64(),
+ .mov, .movzx => if (min_abi_size > 4) dst_reg.to64() else dst_reg.to32(),
+ else => unreachable,
+ };
+ switch (src_mcv) {
+ .register => |src_reg| {
+ try self.asmRegisterRegister(
+ tag,
+ dst_alias,
+ registerAlias(src_reg, min_abi_size),
+ );
+ },
+ .stack_offset => |src_off| {
+ try self.asmRegisterMemory(tag, dst_alias, Memory.sib(
+ Memory.PtrSize.fromSize(min_abi_size),
+ .{ .base = .rbp, .disp = -src_off },
+ ));
+ },
+ else => return self.fail("TODO airIntCast from {s} to {s}", .{
+ @tagName(src_mcv),
+ @tagName(dst_mcv),
+ }),
+ }
+ if (self.regExtraBits(min_ty) > 0) try self.truncateRegister(min_ty, dst_reg);
+ },
+ else => {
+ try self.setRegOrMem(min_ty, dst_mcv, src_mcv);
+ const extra = dst_abi_size * 8 - dst_int_info.bits;
+ if (extra > 0) {
+ try self.genShiftBinOpMir(switch (signedness) {
+ .signed => .sal,
+ .unsigned => .shl,
+ }, dst_ty, dst_mcv, .{ .immediate = extra });
+ try self.genShiftBinOpMir(switch (signedness) {
+ .signed => .sar,
+ .unsigned => .shr,
+ }, dst_ty, dst_mcv, .{ .immediate = extra });
+ }
+ },
+ }
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result = if (self.liveness.isUnused(inst)) .dead else result: {
- const dst_ty = self.air.typeOfIndex(inst);
- const dst_abi_size = dst_ty.abiSize(self.target.*);
- if (dst_abi_size > 8) {
- return self.fail("TODO implement trunc for abi sizes larger than 8", .{});
- }
- const src_mcv = try self.resolveInst(ty_op.operand);
- const src_lock = switch (src_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
-
- const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
- src_mcv
- else
- try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
+ const dst_ty = self.air.typeOfIndex(inst);
+ const dst_abi_size = dst_ty.abiSize(self.target.*);
+ if (dst_abi_size > 8) {
+ return self.fail("TODO implement trunc for abi sizes larger than 8", .{});
+ }
- // when truncating a `u16` to `u5`, for example, those top 3 bits in the result
- // have to be removed. this only happens if the dst if not a power-of-two size.
- if (self.regExtraBits(dst_ty) > 0) try self.truncateRegister(dst_ty, dst_mcv.register.to64());
- break :result dst_mcv;
+ const src_mcv = try self.resolveInst(ty_op.operand);
+ const src_lock = switch (src_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
};
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
+ src_mcv
+ else
+ try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
+
+ // when truncating a `u16` to `u5`, for example, those top 3 bits in the result
+ // have to be removed. this only happens if the dst if not a power-of-two size.
+ if (self.regExtraBits(dst_ty) > 0) try self.truncateRegister(dst_ty, dst_mcv.register.to64());
+
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const ty = self.air.typeOfIndex(inst);
+
const operand = try self.resolveInst(un_op);
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else operand;
- return self.finishAir(inst, result, .{ un_op, .none, .none });
+ const dst_mcv = if (self.reuseOperand(inst, un_op, 0, operand))
+ operand
+ else
+ try self.copyToRegisterWithInstTracking(inst, ty, operand);
+
+ return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none });
}
fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
- }
-
const ptr = try self.resolveInst(bin_op.lhs);
const ptr_ty = self.air.typeOf(bin_op.lhs);
const len = try self.resolveInst(bin_op.rhs);
@@ -1612,33 +1666,21 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
fn airUnOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-
- const result = if (self.liveness.isUnused(inst))
- .dead
- else
- try self.genUnOp(inst, tag, ty_op.operand);
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ const dst_mcv = try self.genUnOp(inst, tag, ty_op.operand);
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-
- const result = if (self.liveness.isUnused(inst))
- .dead
- else
- try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs);
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ const dst_mcv = try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs);
+ return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
-
- const result = if (self.liveness.isUnused(inst))
- .dead
- else
- try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs);
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ const dst_mcv = try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs);
+ return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 {
@@ -1678,7 +1720,7 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 {
fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result = if (self.liveness.isUnused(inst)) .dead else result: {
+ const result = result: {
const tag = self.air.instructions.items(.tag)[inst];
const dst_ty = self.air.typeOfIndex(inst);
if (dst_ty.zigTypeTag() == .Float)
@@ -1709,168 +1751,162 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const ty = self.air.typeOf(bin_op.lhs);
+ const ty = self.air.typeOf(bin_op.lhs);
- const lhs_mcv = try self.resolveInst(bin_op.lhs);
- const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv))
- lhs_mcv
- else
- try self.copyToRegisterWithInstTracking(inst, ty, lhs_mcv);
- const dst_reg = dst_mcv.register;
- const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
- defer self.register_manager.unlockReg(dst_lock);
+ const lhs_mcv = try self.resolveInst(bin_op.lhs);
+ const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv))
+ lhs_mcv
+ else
+ try self.copyToRegisterWithInstTracking(inst, ty, lhs_mcv);
+ const dst_reg = dst_mcv.register;
+ const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
+ defer self.register_manager.unlockReg(dst_lock);
- const rhs_mcv = try self.resolveInst(bin_op.rhs);
- const rhs_lock = switch (rhs_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
-
- const limit_reg = try self.register_manager.allocReg(null, gp);
- const limit_mcv = MCValue{ .register = limit_reg };
- const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg);
- defer self.register_manager.unlockReg(limit_lock);
-
- const reg_bits = self.regBitSize(ty);
- const cc: Condition = if (ty.isSignedInt()) cc: {
- try self.genSetReg(ty, limit_reg, dst_mcv);
- try self.genShiftBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
- try self.genBinOpMir(.xor, ty, limit_mcv, .{
- .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
- });
- break :cc .o;
- } else cc: {
- try self.genSetReg(ty, limit_reg, .{
- .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - reg_bits),
- });
- break :cc .c;
- };
- try self.genBinOpMir(.add, ty, dst_mcv, rhs_mcv);
+ const rhs_mcv = try self.resolveInst(bin_op.rhs);
+ const rhs_lock = switch (rhs_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
- const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2);
- try self.asmCmovccRegisterRegister(
- registerAlias(dst_reg, cmov_abi_size),
- registerAlias(limit_reg, cmov_abi_size),
- cc,
- );
- break :result dst_mcv;
+ const limit_reg = try self.register_manager.allocReg(null, gp);
+ const limit_mcv = MCValue{ .register = limit_reg };
+ const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg);
+ defer self.register_manager.unlockReg(limit_lock);
+
+ const reg_bits = self.regBitSize(ty);
+ const cc: Condition = if (ty.isSignedInt()) cc: {
+ try self.genSetReg(ty, limit_reg, dst_mcv);
+ try self.genShiftBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
+ try self.genBinOpMir(.xor, ty, limit_mcv, .{
+ .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
+ });
+ break :cc .o;
+ } else cc: {
+ try self.genSetReg(ty, limit_reg, .{
+ .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - reg_bits),
+ });
+ break :cc .c;
};
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ try self.genBinOpMir(.add, ty, dst_mcv, rhs_mcv);
+
+ const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2);
+ try self.asmCmovccRegisterRegister(
+ registerAlias(dst_reg, cmov_abi_size),
+ registerAlias(limit_reg, cmov_abi_size),
+ cc,
+ );
+
+ return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const ty = self.air.typeOf(bin_op.lhs);
+ const ty = self.air.typeOf(bin_op.lhs);
- const lhs_mcv = try self.resolveInst(bin_op.lhs);
- const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv))
- lhs_mcv
- else
- try self.copyToRegisterWithInstTracking(inst, ty, lhs_mcv);
- const dst_reg = dst_mcv.register;
- const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
- defer self.register_manager.unlockReg(dst_lock);
-
- const rhs_mcv = try self.resolveInst(bin_op.rhs);
- const rhs_lock = switch (rhs_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
-
- const limit_reg = try self.register_manager.allocReg(null, gp);
- const limit_mcv = MCValue{ .register = limit_reg };
- const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg);
- defer self.register_manager.unlockReg(limit_lock);
-
- const reg_bits = self.regBitSize(ty);
- const cc: Condition = if (ty.isSignedInt()) cc: {
- try self.genSetReg(ty, limit_reg, dst_mcv);
- try self.genShiftBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
- try self.genBinOpMir(.xor, ty, limit_mcv, .{
- .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
- });
- break :cc .o;
- } else cc: {
- try self.genSetReg(ty, limit_reg, .{ .immediate = 0 });
- break :cc .c;
- };
- try self.genBinOpMir(.sub, ty, dst_mcv, rhs_mcv);
+ const lhs_mcv = try self.resolveInst(bin_op.lhs);
+ const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv))
+ lhs_mcv
+ else
+ try self.copyToRegisterWithInstTracking(inst, ty, lhs_mcv);
+ const dst_reg = dst_mcv.register;
+ const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
+ defer self.register_manager.unlockReg(dst_lock);
- const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2);
- try self.asmCmovccRegisterRegister(
- registerAlias(dst_reg, cmov_abi_size),
- registerAlias(limit_reg, cmov_abi_size),
- cc,
- );
- break :result dst_mcv;
+ const rhs_mcv = try self.resolveInst(bin_op.rhs);
+ const rhs_lock = switch (rhs_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
};
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const limit_reg = try self.register_manager.allocReg(null, gp);
+ const limit_mcv = MCValue{ .register = limit_reg };
+ const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg);
+ defer self.register_manager.unlockReg(limit_lock);
+
+ const reg_bits = self.regBitSize(ty);
+ const cc: Condition = if (ty.isSignedInt()) cc: {
+ try self.genSetReg(ty, limit_reg, dst_mcv);
+ try self.genShiftBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
+ try self.genBinOpMir(.xor, ty, limit_mcv, .{
+ .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
+ });
+ break :cc .o;
+ } else cc: {
+ try self.genSetReg(ty, limit_reg, .{ .immediate = 0 });
+ break :cc .c;
+ };
+ try self.genBinOpMir(.sub, ty, dst_mcv, rhs_mcv);
+
+ const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2);
+ try self.asmCmovccRegisterRegister(
+ registerAlias(dst_reg, cmov_abi_size),
+ registerAlias(limit_reg, cmov_abi_size),
+ cc,
+ );
+
+ return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const ty = self.air.typeOf(bin_op.lhs);
+ const ty = self.air.typeOf(bin_op.lhs);
- try self.spillRegisters(&.{ .rax, .rdx });
- const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx });
- defer for (reg_locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock);
+ try self.spillRegisters(&.{ .rax, .rdx });
+ const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx });
+ defer for (reg_locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock);
- const lhs_mcv = try self.resolveInst(bin_op.lhs);
- const lhs_lock = switch (lhs_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
+ const lhs_mcv = try self.resolveInst(bin_op.lhs);
+ const lhs_lock = switch (lhs_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
- const rhs_mcv = try self.resolveInst(bin_op.rhs);
- const rhs_lock = switch (rhs_mcv) {
- .register => |reg| self.register_manager.lockReg(reg),
- else => null,
- };
- defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
-
- const limit_reg = try self.register_manager.allocReg(null, gp);
- const limit_mcv = MCValue{ .register = limit_reg };
- const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg);
- defer self.register_manager.unlockReg(limit_lock);
-
- const reg_bits = self.regBitSize(ty);
- const cc: Condition = if (ty.isSignedInt()) cc: {
- try self.genSetReg(ty, limit_reg, lhs_mcv);
- try self.genBinOpMir(.xor, ty, limit_mcv, rhs_mcv);
- try self.genShiftBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
- try self.genBinOpMir(.xor, ty, limit_mcv, .{
- .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
- });
- break :cc .o;
- } else cc: {
- try self.genSetReg(ty, limit_reg, .{
- .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - reg_bits),
- });
- break :cc .c;
- };
+ const rhs_mcv = try self.resolveInst(bin_op.rhs);
+ const rhs_lock = switch (rhs_mcv) {
+ .register => |reg| self.register_manager.lockReg(reg),
+ else => null,
+ };
+ defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
- const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv);
- const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2);
- try self.asmCmovccRegisterRegister(
- registerAlias(dst_mcv.register, cmov_abi_size),
- registerAlias(limit_reg, cmov_abi_size),
- cc,
- );
- break :result dst_mcv;
+ const limit_reg = try self.register_manager.allocReg(null, gp);
+ const limit_mcv = MCValue{ .register = limit_reg };
+ const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg);
+ defer self.register_manager.unlockReg(limit_lock);
+
+ const reg_bits = self.regBitSize(ty);
+ const cc: Condition = if (ty.isSignedInt()) cc: {
+ try self.genSetReg(ty, limit_reg, lhs_mcv);
+ try self.genBinOpMir(.xor, ty, limit_mcv, rhs_mcv);
+ try self.genShiftBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
+ try self.genBinOpMir(.xor, ty, limit_mcv, .{
+ .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
+ });
+ break :cc .o;
+ } else cc: {
+ try self.genSetReg(ty, limit_reg, .{
+ .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - reg_bits),
+ });
+ break :cc .c;
};
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+
+ const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv);
+ const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2);
+ try self.asmCmovccRegisterRegister(
+ registerAlias(dst_mcv.register, cmov_abi_size),
+ registerAlias(limit_reg, cmov_abi_size),
+ cc,
+ );
+
+ return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const result: MCValue = result: {
const tag = self.air.instructions.items(.tag)[inst];
const ty = self.air.typeOf(bin_op.lhs);
switch (ty.zigTypeTag()) {
@@ -1929,7 +1965,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const result: MCValue = result: {
const lhs_ty = self.air.typeOf(bin_op.lhs);
const rhs_ty = self.air.typeOf(bin_op.rhs);
switch (lhs_ty.zigTypeTag()) {
@@ -2051,7 +2087,7 @@ fn genSetStackTruncatedOverflowCompare(
fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const result: MCValue = result: {
const dst_ty = self.air.typeOf(bin_op.lhs);
switch (dst_ty.zigTypeTag()) {
.Vector => return self.fail("TODO implement mul_with_overflow for Vector type", .{}),
@@ -2240,10 +2276,6 @@ fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCVa
fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
- }
-
try self.spillRegisters(&.{.rcx});
const tag = self.air.instructions.items(.tag)[inst];
@@ -2260,18 +2292,14 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst))
- .dead
- else
- return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ _ = bin_op;
+ return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch});
+ //return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .none;
-
const pl_ty = self.air.typeOfIndex(inst);
const opt_mcv = try self.resolveInst(ty_op.operand);
@@ -2296,18 +2324,15 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
- const dst_ty = self.air.typeOfIndex(inst);
- const opt_mcv = try self.resolveInst(ty_op.operand);
+ const dst_ty = self.air.typeOfIndex(inst);
+ const opt_mcv = try self.resolveInst(ty_op.operand);
- break :result if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv))
- opt_mcv
- else
- try self.copyToRegisterWithInstTracking(inst, dst_ty, opt_mcv);
- };
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ const dst_mcv = if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv))
+ opt_mcv
+ else
+ try self.copyToRegisterWithInstTracking(inst, dst_ty, opt_mcv);
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
@@ -2320,7 +2345,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
if (opt_ty.optionalReprIsPayload()) {
break :result if (self.liveness.isUnused(inst))
- .dead
+ .unreach
else if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
src_mcv
else
@@ -2339,16 +2364,13 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
Memory.sib(.byte, .{ .base = dst_mcv.register, .disp = pl_abi_size }),
Immediate.u(1),
);
- break :result if (self.liveness.isUnused(inst)) .dead else dst_mcv;
+ break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv;
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
- }
const err_union_ty = self.air.typeOf(ty_op.operand);
const err_ty = err_union_ty.errorUnionSet();
const payload_ty = err_union_ty.errorUnionPayload();
@@ -2391,9 +2413,6 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
- }
const err_union_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
const result = try self.genUnwrapErrorUnionPayloadMir(inst, err_union_ty, operand);
@@ -2444,72 +2463,68 @@ fn genUnwrapErrorUnionPayloadMir(
// *(E!T) -> E
fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
- const src_ty = self.air.typeOf(ty_op.operand);
- const src_mcv = try self.resolveInst(ty_op.operand);
- const src_reg = switch (src_mcv) {
- .register => |reg| reg,
- else => try self.copyToTmpRegister(src_ty, src_mcv),
- };
- const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
- defer self.register_manager.unlockReg(src_lock);
+ const src_ty = self.air.typeOf(ty_op.operand);
+ const src_mcv = try self.resolveInst(ty_op.operand);
+ const src_reg = switch (src_mcv) {
+ .register => |reg| reg,
+ else => try self.copyToTmpRegister(src_ty, src_mcv),
+ };
+ const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
+ defer self.register_manager.unlockReg(src_lock);
- const dst_reg = try self.register_manager.allocReg(inst, gp);
- const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
- defer self.register_manager.unlockReg(dst_lock);
+ const dst_reg = try self.register_manager.allocReg(inst, gp);
+ const dst_mcv = MCValue{ .register = dst_reg };
+ const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
+ defer self.register_manager.unlockReg(dst_lock);
- const eu_ty = src_ty.childType();
- const pl_ty = eu_ty.errorUnionPayload();
- const err_ty = eu_ty.errorUnionSet();
- const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*));
- const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*));
- try self.asmRegisterMemory(
- .mov,
- registerAlias(dst_reg, err_abi_size),
- Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{ .base = src_reg, .disp = err_off }),
- );
- break :result .{ .register = dst_reg };
- };
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ const eu_ty = src_ty.childType();
+ const pl_ty = eu_ty.errorUnionPayload();
+ const err_ty = eu_ty.errorUnionSet();
+ const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*));
+ const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*));
+ try self.asmRegisterMemory(
+ .mov,
+ registerAlias(dst_reg, err_abi_size),
+ Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{ .base = src_reg, .disp = err_off }),
+ );
+
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
// *(E!T) -> *T
fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
- const src_ty = self.air.typeOf(ty_op.operand);
- const src_mcv = try self.resolveInst(ty_op.operand);
- const src_reg = switch (src_mcv) {
- .register => |reg| reg,
- else => try self.copyToTmpRegister(src_ty, src_mcv),
- };
- const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
- defer self.register_manager.unlockReg(src_lock);
+ const src_ty = self.air.typeOf(ty_op.operand);
+ const src_mcv = try self.resolveInst(ty_op.operand);
+ const src_reg = switch (src_mcv) {
+ .register => |reg| reg,
+ else => try self.copyToTmpRegister(src_ty, src_mcv),
+ };
+ const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
+ defer self.register_manager.unlockReg(src_lock);
- const dst_ty = self.air.typeOfIndex(inst);
- const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
- src_reg
- else
- try self.register_manager.allocReg(inst, gp);
- const dst_lock = self.register_manager.lockReg(dst_reg);
- defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
+ const dst_ty = self.air.typeOfIndex(inst);
+ const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
+ src_reg
+ else
+ try self.register_manager.allocReg(inst, gp);
+ const dst_mcv = MCValue{ .register = dst_reg };
+ const dst_lock = self.register_manager.lockReg(dst_reg);
+ defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const eu_ty = src_ty.childType();
- const pl_ty = eu_ty.errorUnionPayload();
- const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*));
- const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
- try self.asmRegisterMemory(
- .lea,
- registerAlias(dst_reg, dst_abi_size),
- Memory.sib(.qword, .{ .base = src_reg, .disp = pl_off }),
- );
- break :result .{ .register = dst_reg };
- };
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ const eu_ty = src_ty.childType();
+ const pl_ty = eu_ty.errorUnionPayload();
+ const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*));
+ const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
+ try self.asmRegisterMemory(
+ .lea,
+ registerAlias(dst_reg, dst_abi_size),
+ Memory.sib(.qword, .{ .base = src_reg, .disp = pl_off }),
+ );
+
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
@@ -2535,7 +2550,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
Immediate.u(0),
);
- if (self.liveness.isUnused(inst)) break :result .dead;
+ if (self.liveness.isUnused(inst)) break :result .unreach;
const dst_ty = self.air.typeOfIndex(inst);
const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
@@ -2558,11 +2573,9 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
}
fn airErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void {
- const result: MCValue = if (self.liveness.isUnused(inst))
- .dead
- else
- return self.fail("TODO implement airErrReturnTrace for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ .none, .none, .none });
+ _ = inst;
+ return self.fail("TODO implement airErrReturnTrace for {}", .{self.target.cpu.arch});
+ //return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airSetErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void {
@@ -2578,8 +2591,6 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void {
fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
-
const pl_ty = self.air.typeOf(ty_op.operand);
if (!pl_ty.hasRuntimeBits()) break :result .{ .immediate = 1 };
@@ -2624,10 +2635,6 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
- }
-
const error_union_ty = self.air.getRefType(ty_op.ty);
const payload_ty = error_union_ty.errorUnionPayload();
const operand = try self.resolveInst(ty_op.operand);
@@ -2654,9 +2661,7 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
/// E to E!T
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
- }
+
const error_union_ty = self.air.getRefType(ty_op.ty);
const payload_ty = error_union_ty.errorUnionPayload();
const operand = try self.resolveInst(ty_op.operand);
@@ -2682,7 +2687,7 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result = if (self.liveness.isUnused(inst)) .dead else result: {
+ const result = result: {
const src_mcv = try self.resolveInst(ty_op.operand);
if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv;
@@ -2696,72 +2701,65 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(ty_op.operand);
- const dst_mcv: MCValue = blk: {
- switch (operand) {
- .stack_offset => |off| {
- break :blk MCValue{ .stack_offset = off - 8 };
- },
- else => return self.fail("TODO implement slice_len for {}", .{operand}),
- }
- };
- break :result dst_mcv;
+
+ const operand = try self.resolveInst(ty_op.operand);
+ const dst_mcv: MCValue = blk: {
+ switch (operand) {
+ .stack_offset => |off| {
+ break :blk MCValue{ .stack_offset = off - 8 };
+ },
+ else => return self.fail("TODO implement slice_len for {}", .{operand}),
+ }
};
- log.debug("airSliceLen(%{d}): {}", .{ inst, result });
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
- const src_ty = self.air.typeOf(ty_op.operand);
- const src_mcv = try self.resolveInst(ty_op.operand);
- const src_reg = switch (src_mcv) {
- .register => |reg| reg,
- else => try self.copyToTmpRegister(src_ty, src_mcv),
- };
- const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
- defer self.register_manager.unlockReg(src_lock);
+ const src_ty = self.air.typeOf(ty_op.operand);
+ const src_mcv = try self.resolveInst(ty_op.operand);
+ const src_reg = switch (src_mcv) {
+ .register => |reg| reg,
+ else => try self.copyToTmpRegister(src_ty, src_mcv),
+ };
+ const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
+ defer self.register_manager.unlockReg(src_lock);
- const dst_ty = self.air.typeOfIndex(inst);
- const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
- src_reg
- else
- try self.register_manager.allocReg(inst, gp);
- const dst_lock = self.register_manager.lockReg(dst_reg);
- defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
+ const dst_ty = self.air.typeOfIndex(inst);
+ const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
+ src_reg
+ else
+ try self.register_manager.allocReg(inst, gp);
+ const dst_mcv = MCValue{ .register = dst_reg };
+ const dst_lock = self.register_manager.lockReg(dst_reg);
+ defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
- try self.asmRegisterMemory(
- .lea,
- registerAlias(dst_reg, dst_abi_size),
- Memory.sib(.qword, .{
- .base = src_reg,
- .disp = @divExact(self.target.cpu.arch.ptrBitWidth(), 8),
- }),
- );
- break :result .{ .register = dst_reg };
- };
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
+ try self.asmRegisterMemory(
+ .lea,
+ registerAlias(dst_reg, dst_abi_size),
+ Memory.sib(.qword, .{
+ .base = src_reg,
+ .disp = @divExact(self.target.cpu.arch.ptrBitWidth(), 8),
+ }),
+ );
+
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
- const dst_ty = self.air.typeOfIndex(inst);
- const opt_mcv = try self.resolveInst(ty_op.operand);
+ const dst_ty = self.air.typeOfIndex(inst);
+ const opt_mcv = try self.resolveInst(ty_op.operand);
- break :result if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv))
- opt_mcv
- else
- try self.copyToRegisterWithInstTracking(inst, dst_ty, opt_mcv);
- };
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ const dst_mcv = if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv))
+ opt_mcv
+ else
+ try self.copyToRegisterWithInstTracking(inst, dst_ty, opt_mcv);
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Register {
@@ -2829,34 +2827,26 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const slice_ty = self.air.typeOf(bin_op.lhs);
- const result = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: {
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf);
- const elem_ptr = try self.genSliceElemPtr(bin_op.lhs, bin_op.rhs);
- const dst_mcv = try self.allocRegOrMem(inst, false);
- try self.load(dst_mcv, elem_ptr, slice_ptr_field_type);
- break :result dst_mcv;
- };
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+
+ var buf: Type.SlicePtrFieldTypeBuffer = undefined;
+ const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf);
+ const elem_ptr = try self.genSliceElemPtr(bin_op.lhs, bin_op.rhs);
+ const dst_mcv = try self.allocRegOrMem(inst, false);
+ try self.load(dst_mcv, elem_ptr, slice_ptr_field_type);
+
+ return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst))
- .dead
- else
- try self.genSliceElemPtr(extra.lhs, extra.rhs);
- return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
+ const dst_mcv = try self.genSliceElemPtr(extra.lhs, extra.rhs);
+ return self.finishAir(inst, dst_mcv, .{ extra.lhs, extra.rhs, .none });
}
fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
- }
-
const array_ty = self.air.typeOf(bin_op.lhs);
const array = try self.resolveInst(bin_op.lhs);
const array_lock: ?RegisterLock = switch (array) {
@@ -2923,77 +2913,74 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.air.typeOf(bin_op.lhs);
- const result = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: {
- // this is identical to the `airPtrElemPtr` codegen expect here an
- // additional `mov` is needed at the end to get the actual value
-
- const elem_ty = ptr_ty.elemType2();
- const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*));
- const index_ty = self.air.typeOf(bin_op.rhs);
- const index_mcv = try self.resolveInst(bin_op.rhs);
- const index_lock = switch (index_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (index_lock) |lock| self.register_manager.unlockReg(lock);
- const offset_reg = try self.elemOffset(index_ty, index_mcv, elem_abi_size);
- const offset_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
- defer self.register_manager.unlockReg(offset_lock);
+ // this is identical to the `airPtrElemPtr` codegen expect here an
+ // additional `mov` is needed at the end to get the actual value
- const ptr_mcv = try self.resolveInst(bin_op.lhs);
- const elem_ptr_reg = if (ptr_mcv.isRegister() and self.liveness.operandDies(inst, 0))
- ptr_mcv.register
- else
- try self.copyToTmpRegister(ptr_ty, ptr_mcv);
- const elem_ptr_lock = self.register_manager.lockRegAssumeUnused(elem_ptr_reg);
- defer self.register_manager.unlockReg(elem_ptr_lock);
- try self.asmRegisterRegister(.add, elem_ptr_reg, offset_reg);
+ const elem_ty = ptr_ty.elemType2();
+ const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*));
+ const index_ty = self.air.typeOf(bin_op.rhs);
+ const index_mcv = try self.resolveInst(bin_op.rhs);
+ const index_lock = switch (index_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (index_lock) |lock| self.register_manager.unlockReg(lock);
- const dst_mcv = try self.allocRegOrMem(inst, true);
- const dst_lock = switch (dst_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- try self.load(dst_mcv, .{ .register = elem_ptr_reg }, ptr_ty);
- break :result dst_mcv;
+ const offset_reg = try self.elemOffset(index_ty, index_mcv, elem_abi_size);
+ const offset_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
+ defer self.register_manager.unlockReg(offset_lock);
+
+ const ptr_mcv = try self.resolveInst(bin_op.lhs);
+ const elem_ptr_reg = if (ptr_mcv.isRegister() and self.liveness.operandDies(inst, 0))
+ ptr_mcv.register
+ else
+ try self.copyToTmpRegister(ptr_ty, ptr_mcv);
+ const elem_ptr_lock = self.register_manager.lockRegAssumeUnused(elem_ptr_reg);
+ defer self.register_manager.unlockReg(elem_ptr_lock);
+ try self.asmRegisterRegister(.add, elem_ptr_reg, offset_reg);
+
+ const dst_mcv = try self.allocRegOrMem(inst, true);
+ const dst_lock = switch (dst_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
};
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
+ try self.load(dst_mcv, .{ .register = elem_ptr_reg }, ptr_ty);
+
+ return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const result = if (self.liveness.isUnused(inst)) .dead else result: {
- const ptr_ty = self.air.typeOf(extra.lhs);
- const ptr = try self.resolveInst(extra.lhs);
- const ptr_lock: ?RegisterLock = switch (ptr) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
+ const ptr_ty = self.air.typeOf(extra.lhs);
+ const ptr = try self.resolveInst(extra.lhs);
+ const ptr_lock: ?RegisterLock = switch (ptr) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
- const elem_ty = ptr_ty.elemType2();
- const elem_abi_size = elem_ty.abiSize(self.target.*);
- const index_ty = self.air.typeOf(extra.rhs);
- const index = try self.resolveInst(extra.rhs);
- const index_lock: ?RegisterLock = switch (index) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (index_lock) |lock| self.register_manager.unlockReg(lock);
+ const elem_ty = ptr_ty.elemType2();
+ const elem_abi_size = elem_ty.abiSize(self.target.*);
+ const index_ty = self.air.typeOf(extra.rhs);
+ const index = try self.resolveInst(extra.rhs);
+ const index_lock: ?RegisterLock = switch (index) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (index_lock) |lock| self.register_manager.unlockReg(lock);
- const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size);
- const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
- defer self.register_manager.unlockReg(offset_reg_lock);
+ const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size);
+ const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
+ defer self.register_manager.unlockReg(offset_reg_lock);
- const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr);
- try self.genBinOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg });
- break :result dst_mcv;
- };
- return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
+ const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr);
+ try self.genBinOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg });
+
+ return self.finishAir(inst, dst_mcv, .{ extra.lhs, extra.rhs, .none });
}
fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
@@ -3035,9 +3022,6 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
- }
const tag_ty = self.air.typeOfIndex(inst);
const union_ty = self.air.typeOf(ty_op.operand);
@@ -3089,8 +3073,6 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
fn airClz(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
-
const dst_ty = self.air.typeOfIndex(inst);
const src_ty = self.air.typeOf(ty_op.operand);
@@ -3158,8 +3140,6 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
-
const dst_ty = self.air.typeOfIndex(inst);
const src_ty = self.air.typeOf(ty_op.operand);
const src_bits = src_ty.bitSize(self.target.*);
@@ -3216,8 +3196,6 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
fn airPopcount(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
-
const src_ty = self.air.typeOf(ty_op.operand);
const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*));
const src_mcv = try self.resolveInst(ty_op.operand);
@@ -3386,148 +3364,138 @@ fn byteSwap(self: *Self, inst: Air.Inst.Index, src_ty: Type, src_mcv: MCValue, m
fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
-
- const src_ty = self.air.typeOf(ty_op.operand);
- const src_mcv = try self.resolveInst(ty_op.operand);
- const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, true);
- switch (self.regExtraBits(src_ty)) {
- 0 => {},
- else => |extra| try self.genBinOpMir(
- if (src_ty.isSignedInt()) .sar else .shr,
- src_ty,
- dst_mcv,
- .{ .immediate = extra },
- ),
- }
- break :result dst_mcv;
- };
+ const src_ty = self.air.typeOf(ty_op.operand);
+ const src_mcv = try self.resolveInst(ty_op.operand);
+
+ const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, true);
+ switch (self.regExtraBits(src_ty)) {
+ 0 => {},
+ else => |extra| try self.genBinOpMir(
+ if (src_ty.isSignedInt()) .sar else .shr,
+ src_ty,
+ dst_mcv,
+ .{ .immediate = extra },
+ ),
+ }
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
- const src_ty = self.air.typeOf(ty_op.operand);
- const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*));
- const src_mcv = try self.resolveInst(ty_op.operand);
+ const src_ty = self.air.typeOf(ty_op.operand);
+ const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*));
+ const src_mcv = try self.resolveInst(ty_op.operand);
- const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, false);
- const dst_reg = dst_mcv.register;
- const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
- defer self.register_manager.unlockReg(dst_lock);
+ const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, false);
+ const dst_reg = dst_mcv.register;
+ const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
+ defer self.register_manager.unlockReg(dst_lock);
- const tmp_reg = try self.register_manager.allocReg(null, gp);
- const tmp_lock = self.register_manager.lockReg(tmp_reg);
- defer if (tmp_lock) |lock| self.register_manager.unlockReg(lock);
+ const tmp_reg = try self.register_manager.allocReg(null, gp);
+ const tmp_lock = self.register_manager.lockReg(tmp_reg);
+ defer if (tmp_lock) |lock| self.register_manager.unlockReg(lock);
- {
- const dst = registerAlias(dst_reg, src_abi_size);
- const tmp = registerAlias(tmp_reg, src_abi_size);
- const imm = if (src_abi_size > 4)
- try self.register_manager.allocReg(null, gp)
- else
- undefined;
+ {
+ const dst = registerAlias(dst_reg, src_abi_size);
+ const tmp = registerAlias(tmp_reg, src_abi_size);
+ const imm = if (src_abi_size > 4)
+ try self.register_manager.allocReg(null, gp)
+ else
+ undefined;
- const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - src_abi_size * 8);
- const imm_0000_1111 = Immediate.u(mask / 0b0001_0001);
- const imm_00_11 = Immediate.u(mask / 0b01_01);
- const imm_0_1 = Immediate.u(mask / 0b1_1);
+ const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - src_abi_size * 8);
+ const imm_0000_1111 = Immediate.u(mask / 0b0001_0001);
+ const imm_00_11 = Immediate.u(mask / 0b01_01);
+ const imm_0_1 = Immediate.u(mask / 0b1_1);
- // dst = temp1 = bswap(operand)
- try self.asmRegisterRegister(.mov, tmp, dst);
- // tmp = temp1
- try self.asmRegisterImmediate(.shr, dst, Immediate.u(4));
- // dst = temp1 >> 4
- if (src_abi_size > 4) {
- try self.asmRegisterImmediate(.mov, imm, imm_0000_1111);
- try self.asmRegisterRegister(.@"and", tmp, imm);
- try self.asmRegisterRegister(.@"and", dst, imm);
- } else {
- try self.asmRegisterImmediate(.@"and", tmp, imm_0000_1111);
- try self.asmRegisterImmediate(.@"and", dst, imm_0000_1111);
- }
- // tmp = temp1 & 0x0F...0F
- // dst = (temp1 >> 4) & 0x0F...0F
- try self.asmRegisterImmediate(.shl, tmp, Immediate.u(4));
- // tmp = (temp1 & 0x0F...0F) << 4
- try self.asmRegisterRegister(.@"or", dst, tmp);
- // dst = temp2 = ((temp1 >> 4) & 0x0F...0F) | ((temp1 & 0x0F...0F) << 4)
- try self.asmRegisterRegister(.mov, tmp, dst);
- // tmp = temp2
- try self.asmRegisterImmediate(.shr, dst, Immediate.u(2));
- // dst = temp2 >> 2
- if (src_abi_size > 4) {
- try self.asmRegisterImmediate(.mov, imm, imm_00_11);
- try self.asmRegisterRegister(.@"and", tmp, imm);
- try self.asmRegisterRegister(.@"and", dst, imm);
- } else {
- try self.asmRegisterImmediate(.@"and", tmp, imm_00_11);
- try self.asmRegisterImmediate(.@"and", dst, imm_00_11);
- }
- // tmp = temp2 & 0x33...33
- // dst = (temp2 >> 2) & 0x33...33
- try self.asmRegisterMemory(
- .lea,
- if (src_abi_size > 4) tmp.to64() else tmp.to32(),
- Memory.sib(.qword, .{
- .base = dst.to64(),
- .scale_index = .{ .index = tmp.to64(), .scale = 1 << 2 },
- }),
- );
- // tmp = temp3 = ((temp2 >> 2) & 0x33...33) + ((temp2 & 0x33...33) << 2)
- try self.asmRegisterRegister(.mov, dst, tmp);
- // dst = temp3
- try self.asmRegisterImmediate(.shr, tmp, Immediate.u(1));
- // tmp = temp3 >> 1
- if (src_abi_size > 4) {
- try self.asmRegisterImmediate(.mov, imm, imm_0_1);
- try self.asmRegisterRegister(.@"and", dst, imm);
- try self.asmRegisterRegister(.@"and", tmp, imm);
- } else {
- try self.asmRegisterImmediate(.@"and", dst, imm_0_1);
- try self.asmRegisterImmediate(.@"and", tmp, imm_0_1);
- }
- // dst = temp3 & 0x55...55
- // tmp = (temp3 >> 1) & 0x55...55
- try self.asmRegisterMemory(
- .lea,
- if (src_abi_size > 4) dst.to64() else dst.to32(),
- Memory.sib(.qword, .{
- .base = tmp.to64(),
- .scale_index = .{ .index = dst.to64(), .scale = 1 << 1 },
- }),
- );
- // dst = ((temp3 >> 1) & 0x55...55) + ((temp3 & 0x55...55) << 1)
+ // dst = temp1 = bswap(operand)
+ try self.asmRegisterRegister(.mov, tmp, dst);
+ // tmp = temp1
+ try self.asmRegisterImmediate(.shr, dst, Immediate.u(4));
+ // dst = temp1 >> 4
+ if (src_abi_size > 4) {
+ try self.asmRegisterImmediate(.mov, imm, imm_0000_1111);
+ try self.asmRegisterRegister(.@"and", tmp, imm);
+ try self.asmRegisterRegister(.@"and", dst, imm);
+ } else {
+ try self.asmRegisterImmediate(.@"and", tmp, imm_0000_1111);
+ try self.asmRegisterImmediate(.@"and", dst, imm_0000_1111);
}
-
- switch (self.regExtraBits(src_ty)) {
- 0 => {},
- else => |extra| try self.genBinOpMir(
- if (src_ty.isSignedInt()) .sar else .shr,
- src_ty,
- dst_mcv,
- .{ .immediate = extra },
- ),
+ // tmp = temp1 & 0x0F...0F
+ // dst = (temp1 >> 4) & 0x0F...0F
+ try self.asmRegisterImmediate(.shl, tmp, Immediate.u(4));
+ // tmp = (temp1 & 0x0F...0F) << 4
+ try self.asmRegisterRegister(.@"or", dst, tmp);
+ // dst = temp2 = ((temp1 >> 4) & 0x0F...0F) | ((temp1 & 0x0F...0F) << 4)
+ try self.asmRegisterRegister(.mov, tmp, dst);
+ // tmp = temp2
+ try self.asmRegisterImmediate(.shr, dst, Immediate.u(2));
+ // dst = temp2 >> 2
+ if (src_abi_size > 4) {
+ try self.asmRegisterImmediate(.mov, imm, imm_00_11);
+ try self.asmRegisterRegister(.@"and", tmp, imm);
+ try self.asmRegisterRegister(.@"and", dst, imm);
+ } else {
+ try self.asmRegisterImmediate(.@"and", tmp, imm_00_11);
+ try self.asmRegisterImmediate(.@"and", dst, imm_00_11);
}
- break :result dst_mcv;
- };
+ // tmp = temp2 & 0x33...33
+ // dst = (temp2 >> 2) & 0x33...33
+ try self.asmRegisterMemory(
+ .lea,
+ if (src_abi_size > 4) tmp.to64() else tmp.to32(),
+ Memory.sib(.qword, .{
+ .base = dst.to64(),
+ .scale_index = .{ .index = tmp.to64(), .scale = 1 << 2 },
+ }),
+ );
+ // tmp = temp3 = ((temp2 >> 2) & 0x33...33) + ((temp2 & 0x33...33) << 2)
+ try self.asmRegisterRegister(.mov, dst, tmp);
+ // dst = temp3
+ try self.asmRegisterImmediate(.shr, tmp, Immediate.u(1));
+ // tmp = temp3 >> 1
+ if (src_abi_size > 4) {
+ try self.asmRegisterImmediate(.mov, imm, imm_0_1);
+ try self.asmRegisterRegister(.@"and", dst, imm);
+ try self.asmRegisterRegister(.@"and", tmp, imm);
+ } else {
+ try self.asmRegisterImmediate(.@"and", dst, imm_0_1);
+ try self.asmRegisterImmediate(.@"and", tmp, imm_0_1);
+ }
+ // dst = temp3 & 0x55...55
+ // tmp = (temp3 >> 1) & 0x55...55
+ try self.asmRegisterMemory(
+ .lea,
+ if (src_abi_size > 4) dst.to64() else dst.to32(),
+ Memory.sib(.qword, .{
+ .base = tmp.to64(),
+ .scale_index = .{ .index = dst.to64(), .scale = 1 << 1 },
+ }),
+ );
+ // dst = ((temp3 >> 1) & 0x55...55) + ((temp3 & 0x55...55) << 1)
+ }
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ switch (self.regExtraBits(src_ty)) {
+ 0 => {},
+ else => |extra| try self.genBinOpMir(
+ if (src_ty.isSignedInt()) .sar else .shr,
+ src_ty,
+ dst_mcv,
+ .{ .immediate = extra },
+ ),
+ }
+
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result: MCValue = if (self.liveness.isUnused(inst))
- .dead
- else
- return self.fail("TODO implement airUnaryMath for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ un_op, .none, .none });
+ _ = un_op;
+ return self.fail("TODO implement airUnaryMath for {}", .{self.target.cpu.arch});
+ //return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn reuseOperand(
@@ -3559,10 +3527,7 @@ fn reuseOperand(
// Prevent the operand deaths processing code from deallocating it.
self.liveness.clearOperandDeath(inst, op_index);
-
- // That makes us responsible for doing the rest of the stuff that processDeath would have done.
- const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
- branch.inst_table.putAssumeCapacity(Air.refToIndex(operand).?, .dead);
+ self.getResolvedInstValue(Air.refToIndex(operand).?).reuse(self);
return true;
}
@@ -3703,9 +3668,6 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
const ptr = try self.resolveInst(ty_op.operand);
- const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr();
- if (self.liveness.isUnused(inst) and !is_volatile) break :result .dead;
-
const dst_mcv: MCValue = if (elem_size <= 8 and self.reuseOperand(inst, ty_op.operand, 0, ptr))
// The MCValue that holds the pointer can be re-used as the value.
ptr
@@ -4002,10 +3964,6 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
}
fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
- if (self.liveness.isUnused(inst)) {
- return MCValue.dead;
- }
-
const mcv = try self.resolveInst(operand);
const ptr_ty = self.air.typeOf(operand);
const container_ty = ptr_ty.childType();
@@ -4072,7 +4030,7 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32
fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const result: MCValue = result: {
const operand = extra.struct_operand;
const index = extra.field_index;
@@ -4220,11 +4178,9 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst))
- .dead
- else
- return self.fail("TODO implement airFieldParentPtr for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ _ = ty_op;
+ return self.fail("TODO implement airFieldParentPtr for {}", .{self.target.cpu.arch});
+ //return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue {
@@ -5443,9 +5399,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
const src_index = self.air.instructions.items(.data)[inst].arg.src_index;
const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index);
- const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
-
+ const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: {
const dst_mcv: MCValue = switch (mcv) {
.register => |reg| blk: {
self.register_manager.getRegAssumeFree(reg.to64(), inst);
@@ -5536,23 +5490,17 @@ fn airBreakpoint(self: *Self) !void {
}
fn airRetAddr(self: *Self, inst: Air.Inst.Index) !void {
- const result = if (self.liveness.isUnused(inst)) .dead else result: {
- const dst_mcv = try self.allocRegOrMem(inst, true);
- try self.setRegOrMem(Type.usize, dst_mcv, .{
- .stack_offset = -@as(i32, @divExact(self.target.cpu.arch.ptrBitWidth(), 8)),
- });
- break :result dst_mcv;
- };
- return self.finishAir(inst, result, .{ .none, .none, .none });
+ const dst_mcv = try self.allocRegOrMem(inst, true);
+ try self.setRegOrMem(Type.usize, dst_mcv, .{
+ .stack_offset = -@as(i32, @divExact(self.target.cpu.arch.ptrBitWidth(), 8)),
+ });
+ return self.finishAir(inst, dst_mcv, .{ .none, .none, .none });
}
fn airFrameAddress(self: *Self, inst: Air.Inst.Index) !void {
- const result = if (self.liveness.isUnused(inst)) .dead else result: {
- const dst_mcv = try self.allocRegOrMem(inst, true);
- try self.setRegOrMem(Type.usize, dst_mcv, .{ .register = .rbp });
- break :result dst_mcv;
- };
- return self.finishAir(inst, result, .{ .none, .none, .none });
+ const dst_mcv = try self.allocRegOrMem(inst, true);
+ try self.setRegOrMem(Type.usize, dst_mcv, .{ .register = .rbp });
+ return self.finishAir(inst, dst_mcv, .{ .none, .none, .none });
}
fn airFence(self: *Self, inst: Air.Inst.Index) !void {
@@ -5749,7 +5697,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}
const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
+ if (self.liveness.isUnused(inst)) break :result .unreach;
switch (info.return_value) {
.register => {
@@ -5771,12 +5719,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
std.mem.copy(Air.Inst.Ref, buf[1..], args);
return self.finishAir(inst, result, buf);
}
- var bt = try self.iterateBigTomb(inst, 1 + args.len);
- bt.feed(callee);
- for (args) |arg| {
- bt.feed(arg);
- }
- return bt.finishAir(result);
+ var bt = self.liveness.iterateBigTomb(inst);
+ self.feed(&bt, callee);
+ for (args) |arg| self.feed(&bt, arg);
+ return self.finishAirResult(inst, result);
}
fn airRet(self: *Self, inst: Air.Inst.Index) !void {
@@ -5799,7 +5745,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
// which is available if the jump is 127 bytes or less forward.
const jmp_reloc = try self.asmJmpReloc(undefined);
try self.exitlude_jump_relocs.append(self.gpa, jmp_reloc);
- return self.finishAir(inst, .dead, .{ un_op, .none, .none });
+ return self.finishAir(inst, .unreach, .{ un_op, .none, .none });
}
fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
@@ -5829,65 +5775,63 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
// which is available if the jump is 127 bytes or less forward.
const jmp_reloc = try self.asmJmpReloc(undefined);
try self.exitlude_jump_relocs.append(self.gpa, jmp_reloc);
- return self.finishAir(inst, .dead, .{ un_op, .none, .none });
+ return self.finishAir(inst, .unreach, .{ un_op, .none, .none });
}
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const ty = self.air.typeOf(bin_op.lhs);
- const ty_abi_size = ty.abiSize(self.target.*);
- const can_reuse = ty_abi_size <= 8;
+ const ty = self.air.typeOf(bin_op.lhs);
+ const ty_abi_size = ty.abiSize(self.target.*);
+ const can_reuse = ty_abi_size <= 8;
- try self.spillEflagsIfOccupied();
- self.eflags_inst = inst;
+ try self.spillEflagsIfOccupied();
+ self.eflags_inst = inst;
- const lhs_mcv = try self.resolveInst(bin_op.lhs);
- const lhs_lock = switch (lhs_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
+ const lhs_mcv = try self.resolveInst(bin_op.lhs);
+ const lhs_lock = switch (lhs_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
- const rhs_mcv = try self.resolveInst(bin_op.rhs);
- const rhs_lock = switch (rhs_mcv) {
- .register => |reg| self.register_manager.lockReg(reg),
- else => null,
- };
- defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
-
- const dst_mem_ok = !ty.isRuntimeFloat();
- var flipped = false;
- const dst_mcv: MCValue = if (can_reuse and !lhs_mcv.isImmediate() and
- (dst_mem_ok or lhs_mcv.isRegister()) and self.liveness.operandDies(inst, 0))
- lhs_mcv
- else if (can_reuse and !rhs_mcv.isImmediate() and
- (dst_mem_ok or rhs_mcv.isRegister()) and self.liveness.operandDies(inst, 1))
- dst: {
- flipped = true;
- break :dst rhs_mcv;
- } else if (dst_mem_ok) dst: {
- const dst_mcv = try self.allocTempRegOrMem(ty, true);
- try self.setRegOrMem(ty, dst_mcv, lhs_mcv);
- break :dst dst_mcv;
- } else .{ .register = try self.copyToTmpRegister(ty, lhs_mcv) };
- const dst_lock = switch (dst_mcv) {
- .register => |reg| self.register_manager.lockReg(reg),
- else => null,
- };
- defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
+ const rhs_mcv = try self.resolveInst(bin_op.rhs);
+ const rhs_lock = switch (rhs_mcv) {
+ .register => |reg| self.register_manager.lockReg(reg),
+ else => null,
+ };
+ defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const dst_mem_ok = !ty.isRuntimeFloat();
+ var flipped = false;
+ const dst_mcv: MCValue = if (can_reuse and !lhs_mcv.isImmediate() and
+ (dst_mem_ok or lhs_mcv.isRegister()) and self.liveness.operandDies(inst, 0))
+ lhs_mcv
+ else if (can_reuse and !rhs_mcv.isImmediate() and
+ (dst_mem_ok or rhs_mcv.isRegister()) and self.liveness.operandDies(inst, 1))
+ dst: {
+ flipped = true;
+ break :dst rhs_mcv;
+ } else if (dst_mem_ok) dst: {
+ const dst_mcv = try self.allocTempRegOrMem(ty, true);
+ try self.setRegOrMem(ty, dst_mcv, lhs_mcv);
+ break :dst dst_mcv;
+ } else .{ .register = try self.copyToTmpRegister(ty, lhs_mcv) };
+ const dst_lock = switch (dst_mcv) {
+ .register => |reg| self.register_manager.lockReg(reg),
+ else => null,
+ };
+ defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const src_mcv = if (flipped) lhs_mcv else rhs_mcv;
- try self.genBinOpMir(switch (ty.tag()) {
- else => .cmp,
- .f32 => .ucomiss,
- .f64 => .ucomisd,
- }, ty, dst_mcv, src_mcv);
+ const src_mcv = if (flipped) lhs_mcv else rhs_mcv;
+ try self.genBinOpMir(switch (ty.tag()) {
+ else => .cmp,
+ .f32 => .ucomiss,
+ .f64 => .ucomisd,
+ }, ty, dst_mcv, src_mcv);
- const signedness = if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned;
- break :result .{
- .eflags = Condition.fromCompareOperator(signedness, if (flipped) op.reverse() else op),
- };
+ const signedness = if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned;
+ const result = MCValue{
+ .eflags = Condition.fromCompareOperator(signedness, if (flipped) op.reverse() else op),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -5899,56 +5843,55 @@ fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void {
fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const addr_reg = try self.register_manager.allocReg(null, gp);
- const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
- defer self.register_manager.unlockReg(addr_lock);
-
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForLazySymbol(
- .{ .kind = .const_data, .ty = Type.anyerror },
- 4, // dword alignment
- );
- const got_addr = elf_file.getAtom(atom_index).getOffsetTableAddress(elf_file);
- try self.asmRegisterMemory(.mov, addr_reg.to64(), Memory.sib(.qword, .{
- .base = .ds,
- .disp = @intCast(i32, got_addr),
- }));
- } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom_index = try coff_file.getOrCreateAtomForLazySymbol(
- .{ .kind = .const_data, .ty = Type.anyerror },
- 4, // dword alignment
- );
- const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
- try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index });
- } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- const atom_index = try macho_file.getOrCreateAtomForLazySymbol(
- .{ .kind = .const_data, .ty = Type.anyerror },
- 4, // dword alignment
- );
- const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
- try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index });
- } else {
- return self.fail("TODO implement airErrorName for x86_64 {s}", .{@tagName(self.bin_file.tag)});
- }
- try self.spillEflagsIfOccupied();
- self.eflags_inst = inst;
+ const addr_reg = try self.register_manager.allocReg(null, gp);
+ const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
+ defer self.register_manager.unlockReg(addr_lock);
- const op_ty = self.air.typeOf(un_op);
- const op_abi_size = @intCast(u32, op_ty.abiSize(self.target.*));
- const op_mcv = try self.resolveInst(un_op);
- const dst_reg = switch (op_mcv) {
- .register => |reg| reg,
- else => try self.copyToTmpRegister(op_ty, op_mcv),
- };
- try self.asmRegisterMemory(
- .cmp,
- registerAlias(dst_reg, op_abi_size),
- Memory.sib(Memory.PtrSize.fromSize(op_abi_size), .{ .base = addr_reg }),
+ if (self.bin_file.cast(link.File.Elf)) |elf_file| {
+ const atom_index = try elf_file.getOrCreateAtomForLazySymbol(
+ .{ .kind = .const_data, .ty = Type.anyerror },
+ 4, // dword alignment
);
- break :result .{ .eflags = .b };
+ const got_addr = elf_file.getAtom(atom_index).getOffsetTableAddress(elf_file);
+ try self.asmRegisterMemory(.mov, addr_reg.to64(), Memory.sib(.qword, .{
+ .base = .ds,
+ .disp = @intCast(i32, got_addr),
+ }));
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
+ const atom_index = try coff_file.getOrCreateAtomForLazySymbol(
+ .{ .kind = .const_data, .ty = Type.anyerror },
+ 4, // dword alignment
+ );
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
+ try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index });
+ } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
+ const atom_index = try macho_file.getOrCreateAtomForLazySymbol(
+ .{ .kind = .const_data, .ty = Type.anyerror },
+ 4, // dword alignment
+ );
+ const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
+ try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index });
+ } else {
+ return self.fail("TODO implement airErrorName for x86_64 {s}", .{@tagName(self.bin_file.tag)});
+ }
+
+ try self.spillEflagsIfOccupied();
+ self.eflags_inst = inst;
+
+ const op_ty = self.air.typeOf(un_op);
+ const op_abi_size = @intCast(u32, op_ty.abiSize(self.target.*));
+ const op_mcv = try self.resolveInst(un_op);
+ const dst_reg = switch (op_mcv) {
+ .register => |reg| reg,
+ else => try self.copyToTmpRegister(op_ty, op_mcv),
};
+ try self.asmRegisterMemory(
+ .cmp,
+ registerAlias(dst_reg, op_abi_size),
+ Memory.sib(Memory.PtrSize.fromSize(op_abi_size), .{ .base = addr_reg }),
+ );
+ const result = MCValue{ .eflags = .b };
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -5957,9 +5900,8 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void {
const extra = self.air.extraData(Air.Try, pl_op.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
const err_union_ty = self.air.typeOf(pl_op.operand);
- const err_union = try self.resolveInst(pl_op.operand);
- const result = try self.genTry(inst, err_union, body, err_union_ty, false);
- return self.finishAir(inst, result, .{ pl_op.operand, .none, .none });
+ const result = try self.genTry(inst, pl_op.operand, body, err_union_ty, false);
+ return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void {
@@ -5967,15 +5909,14 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void {
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
const err_union_ty = self.air.typeOf(extra.data.ptr).childType();
- const err_union_ptr = try self.resolveInst(extra.data.ptr);
- const result = try self.genTry(inst, err_union_ptr, body, err_union_ty, true);
- return self.finishAir(inst, result, .{ extra.data.ptr, .none, .none });
+ const result = try self.genTry(inst, extra.data.ptr, body, err_union_ty, true);
+ return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn genTry(
self: *Self,
inst: Air.Inst.Index,
- err_union: MCValue,
+ err_union: Air.Inst.Ref,
body: []const Air.Inst.Index,
err_union_ty: Type,
operand_is_ptr: bool,
@@ -5983,14 +5924,37 @@ fn genTry(
if (operand_is_ptr) {
return self.fail("TODO genTry for pointers", .{});
}
- const is_err_mcv = try self.isErr(null, err_union_ty, err_union);
+ const liveness_cond_br = self.liveness.getCondBr(inst);
+
+ const err_union_mcv = try self.resolveInst(err_union);
+ const is_err_mcv = try self.isErr(null, err_union_ty, err_union_mcv);
+
const reloc = try self.genCondBrMir(Type.anyerror, is_err_mcv);
+
+ if (self.liveness.operandDies(inst, 0)) {
+ if (Air.refToIndex(err_union)) |err_union_inst| self.processDeath(err_union_inst);
+ }
+
+ self.scope_generation += 1;
+ const state = try self.saveState();
+
+ for (liveness_cond_br.else_deaths) |operand| self.processDeath(operand);
try self.genBody(body);
+ try self.restoreState(state, &.{}, .{
+ .emit_instructions = false,
+ .update_tracking = true,
+ .resurrect = true,
+ .close_scope = true,
+ });
+
try self.performReloc(reloc);
+
+ for (liveness_cond_br.then_deaths) |operand| self.processDeath(operand);
+
const result = if (self.liveness.isUnused(inst))
- .dead
+ .unreach
else
- try self.genUnwrapErrorUnionPayloadMir(inst, err_union_ty, err_union);
+ try self.genUnwrapErrorUnionPayloadMir(inst, err_union_ty, err_union_mcv);
return result;
}
@@ -6013,12 +5977,12 @@ fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void {
const function = self.air.values[ty_pl.payload].castTag(.function).?.data;
// TODO emit debug info for function change
_ = function;
- return self.finishAir(inst, .dead, .{ .none, .none, .none });
+ return self.finishAir(inst, .unreach, .{ .none, .none, .none });
}
fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void {
// TODO emit debug info lexical block
- return self.finishAir(inst, .dead, .{ .none, .none, .none });
+ return self.finishAir(inst, .unreach, .{ .none, .none, .none });
}
fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void {
@@ -6034,7 +5998,7 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void {
const tag = self.air.instructions.items(.tag)[inst];
try self.genVarDbgInfo(tag, ty, mcv, name);
- return self.finishAir(inst, .dead, .{ operand, .none, .none });
+ return self.finishAir(inst, .unreach, .{ operand, .none, .none });
}
fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 {
@@ -6071,7 +6035,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len];
const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
- const liveness_condbr = self.liveness.getCondBr(inst);
+ const liveness_cond_br = self.liveness.getCondBr(inst);
const reloc = try self.genCondBrMir(cond_ty, cond);
@@ -6082,60 +6046,37 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
if (Air.refToIndex(pl_op.operand)) |op_inst| self.processDeath(op_inst);
}
- // Capture the state of register and stack allocation state so that we can revert to it.
- const saved_state = self.captureState();
-
+ const outer_state = try self.saveState();
{
- try self.branch_stack.append(.{});
- errdefer _ = self.branch_stack.pop();
+ self.scope_generation += 1;
+ const inner_state = try self.saveState();
- try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len);
- for (liveness_condbr.then_deaths) |operand| {
- self.processDeath(operand);
- }
+ for (liveness_cond_br.then_deaths) |operand| self.processDeath(operand);
try self.genBody(then_body);
- }
-
- // Revert to the previous register and stack allocation state.
-
- var then_branch = self.branch_stack.pop();
- defer then_branch.deinit(self.gpa);
-
- self.revertState(saved_state);
-
- try self.performReloc(reloc);
+ try self.restoreState(inner_state, &.{}, .{
+ .emit_instructions = false,
+ .update_tracking = true,
+ .resurrect = true,
+ .close_scope = true,
+ });
- {
- try self.branch_stack.append(.{});
- errdefer _ = self.branch_stack.pop();
+ try self.performReloc(reloc);
- try self.ensureProcessDeathCapacity(liveness_condbr.else_deaths.len);
- for (liveness_condbr.else_deaths) |operand| {
- self.processDeath(operand);
- }
+ for (liveness_cond_br.else_deaths) |operand| self.processDeath(operand);
try self.genBody(else_body);
+ try self.restoreState(inner_state, &.{}, .{
+ .emit_instructions = false,
+ .update_tracking = true,
+ .resurrect = true,
+ .close_scope = true,
+ });
}
-
- var else_branch = self.branch_stack.pop();
- defer else_branch.deinit(self.gpa);
-
- // At this point, each branch will possibly have conflicting values for where
- // each instruction is stored. They agree, however, on which instructions are alive/dead.
- // We use the first ("then") branch as canonical, and here emit
- // instructions into the second ("else") branch to make it conform.
- // We continue respect the data structure semantic guarantees of the else_branch so
- // that we can use all the code emitting abstractions. This is why at the bottom we
- // assert that parent_branch.free_registers equals the saved_then_branch.free_registers
- // rather than assigning it.
- log.debug("airCondBr: %{d}", .{inst});
- log.debug("Upper branches:", .{});
- for (self.branch_stack.items) |bs| {
- log.debug("{}", .{bs.fmtDebug()});
- }
- log.debug("Then branch: {}", .{then_branch.fmtDebug()});
- log.debug("Else branch: {}", .{else_branch.fmtDebug()});
-
- try self.canonicaliseBranches(true, &then_branch, &else_branch, true, true);
+ try self.restoreState(outer_state, &.{}, .{
+ .emit_instructions = false,
+ .update_tracking = false,
+ .resurrect = false,
+ .close_scope = true,
+ });
// We already took care of pl_op.operand earlier, so we're going
// to pass .none here
@@ -6309,67 +6250,53 @@ fn isNonErr(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCVa
fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(un_op);
- const ty = self.air.typeOf(un_op);
- break :result try self.isNull(inst, ty, operand);
- };
+ const operand = try self.resolveInst(un_op);
+ const ty = self.air.typeOf(un_op);
+ const result = try self.isNull(inst, ty, operand);
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(un_op);
- const ty = self.air.typeOf(un_op);
- break :result try self.isNullPtr(inst, ty, operand);
- };
+ const operand = try self.resolveInst(un_op);
+ const ty = self.air.typeOf(un_op);
+ const result = try self.isNullPtr(inst, ty, operand);
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(un_op);
- const ty = self.air.typeOf(un_op);
- break :result switch (try self.isNull(inst, ty, operand)) {
- .eflags => |cc| .{ .eflags = cc.negate() },
- else => unreachable,
- };
+ const operand = try self.resolveInst(un_op);
+ const ty = self.air.typeOf(un_op);
+ const result = switch (try self.isNull(inst, ty, operand)) {
+ .eflags => |cc| .{ .eflags = cc.negate() },
+ else => unreachable,
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(un_op);
- const ty = self.air.typeOf(un_op);
- break :result switch (try self.isNullPtr(inst, ty, operand)) {
- .eflags => |cc| .{ .eflags = cc.negate() },
- else => unreachable,
- };
+ const operand = try self.resolveInst(un_op);
+ const ty = self.air.typeOf(un_op);
+ const result = switch (try self.isNullPtr(inst, ty, operand)) {
+ .eflags => |cc| .{ .eflags = cc.negate() },
+ else => unreachable,
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(un_op);
- const ty = self.air.typeOf(un_op);
- break :result try self.isErr(inst, ty, operand);
- };
+ const operand = try self.resolveInst(un_op);
+ const ty = self.air.typeOf(un_op);
+ const result = try self.isErr(inst, ty, operand);
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ un_op, .none, .none });
- }
-
const operand_ptr = try self.resolveInst(un_op);
const operand_ptr_lock: ?RegisterLock = switch (operand_ptr) {
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
@@ -6395,21 +6322,15 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(un_op);
- const ty = self.air.typeOf(un_op);
- break :result try self.isNonErr(inst, ty, operand);
- };
+ const operand = try self.resolveInst(un_op);
+ const ty = self.air.typeOf(un_op);
+ const result = try self.isNonErr(inst, ty, operand);
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ un_op, .none, .none });
- }
-
const operand_ptr = try self.resolveInst(un_op);
const operand_ptr_lock: ?RegisterLock = switch (operand_ptr) {
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
@@ -6439,103 +6360,61 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[loop.end..][0..loop.data.body_len];
const jmp_target = @intCast(u32, self.mir_instructions.len);
- const liveness_loop = self.liveness.getLoop(inst);
- {
- try self.branch_stack.append(.{});
- errdefer _ = self.branch_stack.pop();
-
- try self.genBody(body);
- }
-
- var branch = self.branch_stack.pop();
- defer branch.deinit(self.gpa);
-
- log.debug("airLoop: %{d}", .{inst});
- log.debug("Upper branches:", .{});
- for (self.branch_stack.items) |bs| {
- log.debug("{}", .{bs.fmtDebug()});
- }
- log.debug("Loop branch: {}", .{branch.fmtDebug()});
-
- var dummy_branch = Branch{};
- defer dummy_branch.deinit(self.gpa);
- try self.canonicaliseBranches(true, &dummy_branch, &branch, true, false);
+ self.scope_generation += 1;
+ const state = try self.saveState();
+ try self.genBody(body);
+ try self.restoreState(state, &.{}, .{
+ .emit_instructions = true,
+ .update_tracking = false,
+ .resurrect = false,
+ .close_scope = true,
+ });
_ = try self.asmJmpReloc(jmp_target);
- try self.ensureProcessDeathCapacity(liveness_loop.deaths.len);
- for (liveness_loop.deaths) |operand| {
- self.processDeath(operand);
- }
-
return self.finishAirBookkeeping();
}
fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
// A block is a setup to be able to jump to the end.
- const branch_depth = @intCast(u32, self.branch_stack.items.len);
- try self.blocks.putNoClobber(self.gpa, inst, .{ .branch_depth = branch_depth });
- defer {
- var block_data = self.blocks.fetchRemove(inst).?.value;
- block_data.deinit(self.gpa);
- }
-
- const ty = self.air.typeOfIndex(inst);
- const unused = !ty.hasRuntimeBitsIgnoreComptime() or self.liveness.isUnused(inst);
-
- {
- // Here we use `.none` to represent a null value so that the first break
- // instruction will choose a MCValue for the block result and overwrite
- // this field. Following break instructions will use that MCValue to put
- // their block results.
- const result: MCValue = if (unused) .dead else .none;
- const branch = &self.branch_stack.items[branch_depth - 1];
- try branch.inst_table.putNoClobber(self.gpa, inst, result);
- }
+ self.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(.unreach));
- {
- try self.branch_stack.append(.{});
- errdefer _ = self.branch_stack.pop();
-
- const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const extra = self.air.extraData(Air.Block, ty_pl.payload);
- const body = self.air.extra[extra.end..][0..extra.data.body_len];
- try self.genBody(body);
- }
+ self.scope_generation += 1;
+ try self.blocks.putNoClobber(self.gpa, inst, .{ .state = self.initRetroactiveState() });
+ const liveness = self.liveness.getBlock(inst);
- const block_data = self.blocks.getPtr(inst).?;
- const target_branch = self.branch_stack.pop();
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.Block, ty_pl.payload);
+ const body = self.air.extra[extra.end..][0..extra.data.body_len];
+ try self.genBody(body);
- log.debug("airBlock: %{d}", .{inst});
- log.debug("Upper branches:", .{});
- for (self.branch_stack.items) |bs| {
- log.debug("{}", .{bs.fmtDebug()});
+ var block_data = self.blocks.fetchRemove(inst).?;
+ defer block_data.value.deinit(self.gpa);
+ if (block_data.value.relocs.items.len > 0) {
+ try self.restoreState(block_data.value.state, liveness.deaths, .{
+ .emit_instructions = false,
+ .update_tracking = true,
+ .resurrect = true,
+ .close_scope = true,
+ });
+ for (block_data.value.relocs.items) |reloc| try self.performReloc(reloc);
}
- log.debug("Block branch: {}", .{block_data.branch.fmtDebug()});
- log.debug("Target branch: {}", .{target_branch.fmtDebug()});
-
- try self.canonicaliseBranches(true, &block_data.branch, &target_branch, false, false);
- for (block_data.relocs.items) |reloc| try self.performReloc(reloc);
-
- const result = if (unused) .dead else self.getResolvedInstValue(inst).?.*;
- self.getValue(result, inst);
+ const tracking = self.inst_tracking.getPtr(inst).?;
+ if (self.liveness.isUnused(inst)) tracking.die(self);
+ self.getValue(tracking.short, inst);
self.finishAirBookkeeping();
}
-fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
+fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const condition = try self.resolveInst(pl_op.operand);
const condition_ty = self.air.typeOf(pl_op.operand);
const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
var extra_index: usize = switch_br.end;
var case_i: u32 = 0;
- const liveness = try self.liveness.getSwitchBr(
- self.gpa,
- inst,
- switch_br.data.cases_len + 1,
- );
+ const liveness = try self.liveness.getSwitchBr(self.gpa, inst, switch_br.data.cases_len + 1);
defer self.gpa.free(liveness.deaths);
// If the condition dies here in this switch instruction, process
@@ -6545,186 +6424,69 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
if (Air.refToIndex(pl_op.operand)) |op_inst| self.processDeath(op_inst);
}
- log.debug("airSwitch: %{d}", .{inst});
- log.debug("Upper branches:", .{});
- for (self.branch_stack.items) |bs| {
- log.debug("{}", .{bs.fmtDebug()});
- }
-
- var prev_branch: ?Branch = null;
- defer if (prev_branch) |*branch| branch.deinit(self.gpa);
-
- // Capture the state of register and stack allocation state so that we can revert to it.
- const saved_state = self.captureState();
-
- const cases_len = switch_br.data.cases_len + @boolToInt(switch_br.data.else_body_len > 0);
- while (case_i < switch_br.data.cases_len) : (case_i += 1) {
- const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
- const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
- const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
- extra_index = case.end + items.len + case_body.len;
-
- // Revert to the previous register and stack allocation state.
- if (prev_branch) |_| self.revertState(saved_state);
-
- var relocs = try self.gpa.alloc(u32, items.len);
- defer self.gpa.free(relocs);
-
- for (items, relocs) |item, *reloc| {
- try self.spillEflagsIfOccupied();
- const item_mcv = try self.resolveInst(item);
- try self.genBinOpMir(.cmp, condition_ty, condition, item_mcv);
- reloc.* = try self.asmJccReloc(undefined, .ne);
- }
+ const outer_state = try self.saveState();
+ {
+ self.scope_generation += 1;
+ const inner_state = try self.saveState();
+
+ while (case_i < switch_br.data.cases_len) : (case_i += 1) {
+ const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
+ const items = @ptrCast(
+ []const Air.Inst.Ref,
+ self.air.extra[case.end..][0..case.data.items_len],
+ );
+ const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
+ extra_index = case.end + items.len + case_body.len;
- {
- if (cases_len > 1) try self.branch_stack.append(.{});
- errdefer _ = if (cases_len > 1) self.branch_stack.pop();
+ var relocs = try self.gpa.alloc(u32, items.len);
+ defer self.gpa.free(relocs);
- try self.ensureProcessDeathCapacity(liveness.deaths[case_i].len);
- for (liveness.deaths[case_i]) |operand| {
- self.processDeath(operand);
+ for (items, relocs) |item, *reloc| {
+ try self.spillEflagsIfOccupied();
+ const item_mcv = try self.resolveInst(item);
+ try self.genBinOpMir(.cmp, condition_ty, condition, item_mcv);
+ reloc.* = try self.asmJccReloc(undefined, .ne);
}
- try self.genBody(case_body);
- }
+ for (liveness.deaths[case_i]) |operand| self.processDeath(operand);
- // Consolidate returned MCValues between prongs like we do in airCondBr.
- if (cases_len > 1) {
- var case_branch = self.branch_stack.pop();
- errdefer case_branch.deinit(self.gpa);
+ try self.genBody(case_body);
+ try self.restoreState(inner_state, &.{}, .{
+ .emit_instructions = false,
+ .update_tracking = true,
+ .resurrect = true,
+ .close_scope = true,
+ });
- log.debug("Case-{d} branch: {}", .{ case_i, case_branch.fmtDebug() });
- const final = case_i == cases_len - 1;
- if (prev_branch) |*canon_branch| {
- try self.canonicaliseBranches(final, canon_branch, &case_branch, true, true);
- canon_branch.deinit(self.gpa);
- }
- prev_branch = case_branch;
+ for (relocs) |reloc| try self.performReloc(reloc);
}
- for (relocs) |reloc| try self.performReloc(reloc);
- }
-
- if (switch_br.data.else_body_len > 0) {
- const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len];
-
- // Revert to the previous register and stack allocation state.
- if (prev_branch) |_| self.revertState(saved_state);
-
- {
- if (cases_len > 1) try self.branch_stack.append(.{});
- errdefer _ = if (cases_len > 1) self.branch_stack.pop();
+ if (switch_br.data.else_body_len > 0) {
+ const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len];
const else_deaths = liveness.deaths.len - 1;
- try self.ensureProcessDeathCapacity(liveness.deaths[else_deaths].len);
- for (liveness.deaths[else_deaths]) |operand| {
- self.processDeath(operand);
- }
+ for (liveness.deaths[else_deaths]) |operand| self.processDeath(operand);
try self.genBody(else_body);
- }
-
- // Consolidate returned MCValues between a prong and the else branch like we do in airCondBr.
- if (cases_len > 1) {
- var else_branch = self.branch_stack.pop();
- errdefer else_branch.deinit(self.gpa);
-
- log.debug("Else branch: {}", .{else_branch.fmtDebug()});
- if (prev_branch) |*canon_branch| {
- try self.canonicaliseBranches(true, canon_branch, &else_branch, true, true);
- canon_branch.deinit(self.gpa);
- }
- prev_branch = else_branch;
+ try self.restoreState(inner_state, &.{}, .{
+ .emit_instructions = false,
+ .update_tracking = true,
+ .resurrect = true,
+ .close_scope = true,
+ });
}
}
+ try self.restoreState(outer_state, &.{}, .{
+ .emit_instructions = false,
+ .update_tracking = false,
+ .resurrect = false,
+ .close_scope = true,
+ });
// We already took care of pl_op.operand earlier, so we're going to pass .none here
return self.finishAir(inst, .unreach, .{ .none, .none, .none });
}
-fn canonicaliseBranches(
- self: *Self,
- update_parent: bool,
- canon_branch: *Branch,
- target_branch: *const Branch,
- comptime set_values: bool,
- comptime assert_same_deaths: bool,
-) !void {
- var hazard_map = std.AutoHashMap(MCValue, void).init(self.gpa);
- defer hazard_map.deinit();
-
- const parent_branch =
- if (update_parent) &self.branch_stack.items[self.branch_stack.items.len - 1] else undefined;
-
- if (update_parent) try self.ensureProcessDeathCapacity(target_branch.inst_table.count());
- var target_it = target_branch.inst_table.iterator();
- while (target_it.next()) |target_entry| {
- const target_key = target_entry.key_ptr.*;
- const target_value = target_entry.value_ptr.*;
- const canon_mcv = if (canon_branch.inst_table.fetchSwapRemove(target_key)) |canon_entry| blk: {
- // The instruction's MCValue is overridden in both branches.
- if (target_value == .dead) {
- if (update_parent) {
- parent_branch.inst_table.putAssumeCapacity(target_key, .dead);
- }
- if (assert_same_deaths) assert(canon_entry.value == .dead);
- continue;
- }
- if (update_parent) {
- parent_branch.inst_table.putAssumeCapacity(target_key, canon_entry.value);
- }
- break :blk canon_entry.value;
- } else blk: {
- if (target_value == .dead) {
- if (update_parent) {
- parent_branch.inst_table.putAssumeCapacity(target_key, .dead);
- }
- continue;
- }
- // The instruction is only overridden in the else branch.
- // If integer overflow occurs, the question is: why wasn't the instruction marked dead?
- break :blk self.getResolvedInstValue(target_key).?.*;
- };
- log.debug("consolidating target_entry %{d} {}=>{}", .{ target_key, target_value, canon_mcv });
- // TODO handle the case where the destination stack offset / register has something
- // going on there.
- assert(!hazard_map.contains(target_value));
- try hazard_map.putNoClobber(canon_mcv, {});
- if (set_values) {
- try self.setRegOrMem(self.air.typeOfIndex(target_key), canon_mcv, target_value);
- } else self.getValue(canon_mcv, target_key);
- self.freeValue(target_value);
- // TODO track the new register / stack allocation
- }
-
- if (update_parent) try self.ensureProcessDeathCapacity(canon_branch.inst_table.count());
- var canon_it = canon_branch.inst_table.iterator();
- while (canon_it.next()) |canon_entry| {
- const canon_key = canon_entry.key_ptr.*;
- const canon_value = canon_entry.value_ptr.*;
- // We already deleted the items from this table that matched the target_branch.
- // So these are all instructions that are only overridden in the canon branch.
- const parent_mcv =
- if (canon_value != .dead) self.getResolvedInstValue(canon_key).?.* else undefined;
- if (canon_value != .dead) {
- log.debug("consolidating canon_entry %{d} {}=>{}", .{ canon_key, parent_mcv, canon_value });
- // TODO handle the case where the destination stack offset / register has something
- // going on there.
- assert(!hazard_map.contains(parent_mcv));
- try hazard_map.putNoClobber(canon_value, {});
- if (set_values) {
- try self.setRegOrMem(self.air.typeOfIndex(canon_key), canon_value, parent_mcv);
- } else self.getValue(canon_value, canon_key);
- self.freeValue(parent_mcv);
- // TODO track the new register / stack allocation
- }
- if (update_parent) {
- parent_branch.inst_table.putAssumeCapacity(canon_key, canon_value);
- }
- }
-}
-
fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void {
const next_inst = @intCast(u32, self.mir_instructions.len);
switch (self.mir_instructions.items(.tag)[reloc]) {
@@ -6740,76 +6502,53 @@ fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void {
fn airBr(self: *Self, inst: Air.Inst.Index) !void {
const br = self.air.instructions.items(.data)[inst].br;
- const block = br.block_inst;
-
- // The first break instruction encounters `.none` here and chooses a
- // machine code value for the block result, populating this field.
- // Following break instructions encounter that value and use it for
- // the location to store their block results.
- if (self.getResolvedInstValue(block)) |dst_mcv| {
- const src_mcv = try self.resolveInst(br.operand);
- switch (dst_mcv.*) {
- .none => {
- const result = result: {
- if (self.reuseOperand(inst, br.operand, 0, src_mcv)) break :result src_mcv;
-
- const new_mcv = try self.allocRegOrMem(block, true);
- try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, src_mcv);
- break :result new_mcv;
- };
- dst_mcv.* = result;
- self.freeValue(result);
- },
- else => try self.setRegOrMem(self.air.typeOfIndex(block), dst_mcv.*, src_mcv),
- }
- }
+ const src_mcv = try self.resolveInst(br.operand);
+
+ const block_ty = self.air.typeOfIndex(br.block_inst);
+ const block_unused =
+ !block_ty.hasRuntimeBitsIgnoreComptime() or self.liveness.isUnused(br.block_inst);
+ const block_tracking = self.inst_tracking.getPtr(br.block_inst).?;
+ const block_data = self.blocks.getPtr(br.block_inst).?;
+
+ if (block_data.relocs.items.len == 0) {
+ block_tracking.* = InstTracking.init(result: {
+ if (block_unused) break :result .none;
+ if (self.reuseOperand(inst, br.operand, 0, src_mcv)) {
+ // Fix instruction tracking
+ switch (src_mcv) {
+ .register => |reg| if (RegisterManager.indexOfRegIntoTracked(reg)) |index| {
+ self.register_manager.registers[index] = br.block_inst;
+ },
+ else => {},
+ }
+ break :result src_mcv;
+ }
+
+ const new_mcv = try self.allocRegOrMem(br.block_inst, true);
+ try self.setRegOrMem(block_ty, new_mcv, src_mcv);
+ break :result new_mcv;
+ });
+ } else if (!block_unused) try self.setRegOrMem(block_ty, block_tracking.short, src_mcv);
- // Process operand death early so that it is properly accounted for in the Branch below.
+ // Process operand death so that it is properly accounted for in the State below.
if (self.liveness.operandDies(inst, 0)) {
if (Air.refToIndex(br.operand)) |op_inst| self.processDeath(op_inst);
}
- const block_data = self.blocks.getPtr(block).?;
- {
- var branch = Branch{};
- errdefer branch.deinit(self.gpa);
-
- var branch_i = self.branch_stack.items.len - 1;
- while (branch_i >= block_data.branch_depth) : (branch_i -= 1) {
- const table = &self.branch_stack.items[branch_i].inst_table;
- try branch.inst_table.ensureUnusedCapacity(self.gpa, table.count());
- var it = table.iterator();
- while (it.next()) |entry| {
- // This loop could be avoided by tracking inst depth, which
- // will be needed later anyway for reusing loop deaths.
- var parent_branch_i = block_data.branch_depth - 1;
- while (parent_branch_i > 0) : (parent_branch_i -= 1) {
- const parent_table = &self.branch_stack.items[parent_branch_i].inst_table;
- if (parent_table.contains(entry.key_ptr.*)) break;
- } else continue;
- const gop = branch.inst_table.getOrPutAssumeCapacity(entry.key_ptr.*);
- if (!gop.found_existing) gop.value_ptr.* = entry.value_ptr.*;
- }
- }
-
- log.debug("airBr: %{d}", .{inst});
- log.debug("Upper branches:", .{});
- for (self.branch_stack.items) |bs| {
- log.debug("{}", .{bs.fmtDebug()});
- }
- log.debug("Prev branch: {}", .{block_data.branch.fmtDebug()});
- log.debug("Cur branch: {}", .{branch.fmtDebug()});
-
- try self.canonicaliseBranches(false, &block_data.branch, &branch, true, false);
- block_data.branch.deinit(self.gpa);
- block_data.branch = branch;
- }
+ if (block_data.relocs.items.len == 0) {
+ try self.saveRetroactiveState(&block_data.state);
+ block_tracking.die(self);
+ } else try self.restoreState(block_data.state, &.{}, .{
+ .emit_instructions = true,
+ .update_tracking = false,
+ .resurrect = false,
+ .close_scope = false,
+ });
// Emit a jump with a relocation. It will be patched up after the block ends.
- try block_data.relocs.ensureUnusedCapacity(self.gpa, 1);
// Leave the jump offset undefined
const jmp_reloc = try self.asmJmpReloc(undefined);
- block_data.relocs.appendAssumeCapacity(jmp_reloc);
+ try block_data.relocs.append(self.gpa, jmp_reloc);
self.finishAirBookkeeping();
}
@@ -6817,7 +6556,6 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
- const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
const clobbers_len = @truncate(u31, extra.data.flags);
var extra_i: usize = extra.end;
const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
@@ -6826,216 +6564,214 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
extra_i += inputs.len;
var result: MCValue = .none;
- if (!is_volatile and self.liveness.isUnused(inst)) result = .dead else {
- var args = std.StringArrayHashMap(MCValue).init(self.gpa);
- try args.ensureTotalCapacity(outputs.len + inputs.len + clobbers_len);
- defer {
- for (args.values()) |arg| switch (arg) {
- .register => |reg| self.register_manager.unlockReg(.{ .register = reg }),
- else => {},
- };
- args.deinit();
- }
+ var args = std.StringArrayHashMap(MCValue).init(self.gpa);
+ try args.ensureTotalCapacity(outputs.len + inputs.len + clobbers_len);
+ defer {
+ for (args.values()) |arg| switch (arg) {
+ .register => |reg| self.register_manager.unlockReg(.{ .register = reg }),
+ else => {},
+ };
+ args.deinit();
+ }
+
+ if (outputs.len > 1) {
+ return self.fail("TODO implement codegen for asm with more than 1 output", .{});
+ }
- if (outputs.len > 1) {
- return self.fail("TODO implement codegen for asm with more than 1 output", .{});
+ for (outputs) |output| {
+ if (output != .none) {
+ return self.fail("TODO implement codegen for non-expr asm", .{});
+ }
+ const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
+ const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
+ const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
+ // This equation accounts for the fact that even if we have exactly 4 bytes
+ // for the string, we still use the next u32 for the null terminator.
+ extra_i += (constraint.len + name.len + (2 + 3)) / 4;
+
+ const mcv: MCValue = if (mem.eql(u8, constraint, "=r"))
+ .{ .register = self.register_manager.tryAllocReg(inst, gp) orelse
+ return self.fail("ran out of registers lowering inline asm", .{}) }
+ else if (mem.startsWith(u8, constraint, "={") and mem.endsWith(u8, constraint, "}"))
+ .{ .register = parseRegName(constraint["={".len .. constraint.len - "}".len]) orelse
+ return self.fail("unrecognized register constraint: '{s}'", .{constraint}) }
+ else
+ return self.fail("unrecognized constraint: '{s}'", .{constraint});
+ args.putAssumeCapacity(name, mcv);
+ switch (mcv) {
+ .register => |reg| _ = if (RegisterManager.indexOfRegIntoTracked(reg)) |_|
+ self.register_manager.lockRegAssumeUnused(reg),
+ else => {},
}
+ if (output == .none) result = mcv;
+ }
- for (outputs) |output| {
- if (output != .none) {
- return self.fail("TODO implement codegen for non-expr asm", .{});
- }
- const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
- const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
- const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
- // This equation accounts for the fact that even if we have exactly 4 bytes
- // for the string, we still use the next u32 for the null terminator.
- extra_i += (constraint.len + name.len + (2 + 3)) / 4;
-
- const mcv: MCValue = if (mem.eql(u8, constraint, "=r"))
- .{ .register = self.register_manager.tryAllocReg(inst, gp) orelse
- return self.fail("ran out of registers lowering inline asm", .{}) }
- else if (mem.startsWith(u8, constraint, "={") and mem.endsWith(u8, constraint, "}"))
- .{ .register = parseRegName(constraint["={".len .. constraint.len - "}".len]) orelse
- return self.fail("unrecognized register constraint: '{s}'", .{constraint}) }
- else
- return self.fail("unrecognized constraint: '{s}'", .{constraint});
- args.putAssumeCapacity(name, mcv);
- switch (mcv) {
- .register => |reg| _ = if (RegisterManager.indexOfRegIntoTracked(reg)) |_|
- self.register_manager.lockRegAssumeUnused(reg),
- else => {},
- }
- if (output == .none) result = mcv;
+ for (inputs) |input| {
+ const input_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
+ const constraint = std.mem.sliceTo(input_bytes, 0);
+ const name = std.mem.sliceTo(input_bytes[constraint.len + 1 ..], 0);
+ // This equation accounts for the fact that even if we have exactly 4 bytes
+ // for the string, we still use the next u32 for the null terminator.
+ extra_i += (constraint.len + name.len + (2 + 3)) / 4;
+
+ if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') {
+ return self.fail("unrecognized asm input constraint: '{s}'", .{constraint});
}
+ const reg_name = constraint[1 .. constraint.len - 1];
+ const reg = parseRegName(reg_name) orelse
+ return self.fail("unrecognized register: '{s}'", .{reg_name});
+
+ const arg_mcv = try self.resolveInst(input);
+ try self.register_manager.getReg(reg, null);
+ try self.genSetReg(self.air.typeOf(input), reg, arg_mcv);
+ }
- for (inputs) |input| {
- const input_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
- const constraint = std.mem.sliceTo(input_bytes, 0);
- const name = std.mem.sliceTo(input_bytes[constraint.len + 1 ..], 0);
+ {
+ var clobber_i: u32 = 0;
+ while (clobber_i < clobbers_len) : (clobber_i += 1) {
+ const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
- extra_i += (constraint.len + name.len + (2 + 3)) / 4;
-
- if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') {
- return self.fail("unrecognized asm input constraint: '{s}'", .{constraint});
- }
- const reg_name = constraint[1 .. constraint.len - 1];
- const reg = parseRegName(reg_name) orelse
- return self.fail("unrecognized register: '{s}'", .{reg_name});
+ extra_i += clobber.len / 4 + 1;
- const arg_mcv = try self.resolveInst(input);
- try self.register_manager.getReg(reg, null);
- try self.genSetReg(self.air.typeOf(input), reg, arg_mcv);
- }
-
- {
- var clobber_i: u32 = 0;
- while (clobber_i < clobbers_len) : (clobber_i += 1) {
- const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
- // This equation accounts for the fact that even if we have exactly 4 bytes
- // for the string, we still use the next u32 for the null terminator.
- extra_i += clobber.len / 4 + 1;
-
- // TODO honor these
- }
+ // TODO honor these
}
+ }
- const asm_source = mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len];
- var line_it = mem.tokenize(u8, asm_source, "\n\r;");
- while (line_it.next()) |line| {
- var mnem_it = mem.tokenize(u8, line, " \t");
- const mnem_str = mnem_it.next() orelse continue;
- if (mem.startsWith(u8, mnem_str, "#")) continue;
-
- const mnem_size: ?Memory.PtrSize = if (mem.endsWith(u8, mnem_str, "b"))
- .byte
- else if (mem.endsWith(u8, mnem_str, "w"))
- .word
- else if (mem.endsWith(u8, mnem_str, "l"))
- .dword
- else if (mem.endsWith(u8, mnem_str, "q"))
- .qword
- else
- null;
- const mnem = std.meta.stringToEnum(Mir.Inst.Tag, mnem_str) orelse
- (if (mnem_size) |_|
- std.meta.stringToEnum(Mir.Inst.Tag, mnem_str[0 .. mnem_str.len - 1])
- else
- null) orelse return self.fail("Invalid mnemonic: '{s}'", .{mnem_str});
-
- var op_it = mem.tokenize(u8, mnem_it.rest(), ",");
- var ops = [1]encoder.Instruction.Operand{.none} ** 4;
- for (&ops) |*op| {
- const op_str = mem.trim(u8, op_it.next() orelse break, " \t");
- if (mem.startsWith(u8, op_str, "#")) break;
- if (mem.startsWith(u8, op_str, "%%")) {
- const colon = mem.indexOfScalarPos(u8, op_str, "%%".len + 2, ':');
- const reg = parseRegName(op_str["%%".len .. colon orelse op_str.len]) orelse
- return self.fail("Invalid register: '{s}'", .{op_str});
- if (colon) |colon_pos| {
- const disp = std.fmt.parseInt(i32, op_str[colon_pos + 1 ..], 0) catch
- return self.fail("Invalid displacement: '{s}'", .{op_str});
- op.* = .{ .mem = Memory.sib(
- mnem_size orelse return self.fail("Unknown size: '{s}'", .{op_str}),
- .{ .base = reg, .disp = disp },
- ) };
- } else {
- if (mnem_size) |size| if (reg.bitSize() != size.bitSize())
- return self.fail("Invalid register size: '{s}'", .{op_str});
- op.* = .{ .reg = reg };
+ const asm_source = mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len];
+ var line_it = mem.tokenize(u8, asm_source, "\n\r;");
+ while (line_it.next()) |line| {
+ var mnem_it = mem.tokenize(u8, line, " \t");
+ const mnem_str = mnem_it.next() orelse continue;
+ if (mem.startsWith(u8, mnem_str, "#")) continue;
+
+ const mnem_size: ?Memory.PtrSize = if (mem.endsWith(u8, mnem_str, "b"))
+ .byte
+ else if (mem.endsWith(u8, mnem_str, "w"))
+ .word
+ else if (mem.endsWith(u8, mnem_str, "l"))
+ .dword
+ else if (mem.endsWith(u8, mnem_str, "q"))
+ .qword
+ else
+ null;
+ const mnem = std.meta.stringToEnum(Mir.Inst.Tag, mnem_str) orelse
+ (if (mnem_size) |_|
+ std.meta.stringToEnum(Mir.Inst.Tag, mnem_str[0 .. mnem_str.len - 1])
+ else
+ null) orelse return self.fail("Invalid mnemonic: '{s}'", .{mnem_str});
+
+ var op_it = mem.tokenize(u8, mnem_it.rest(), ",");
+ var ops = [1]encoder.Instruction.Operand{.none} ** 4;
+ for (&ops) |*op| {
+ const op_str = mem.trim(u8, op_it.next() orelse break, " \t");
+ if (mem.startsWith(u8, op_str, "#")) break;
+ if (mem.startsWith(u8, op_str, "%%")) {
+ const colon = mem.indexOfScalarPos(u8, op_str, "%%".len + 2, ':');
+ const reg = parseRegName(op_str["%%".len .. colon orelse op_str.len]) orelse
+ return self.fail("Invalid register: '{s}'", .{op_str});
+ if (colon) |colon_pos| {
+ const disp = std.fmt.parseInt(i32, op_str[colon_pos + 1 ..], 0) catch
+ return self.fail("Invalid displacement: '{s}'", .{op_str});
+ op.* = .{ .mem = Memory.sib(
+ mnem_size orelse return self.fail("Unknown size: '{s}'", .{op_str}),
+ .{ .base = reg, .disp = disp },
+ ) };
+ } else {
+ if (mnem_size) |size| if (reg.bitSize() != size.bitSize())
+ return self.fail("Invalid register size: '{s}'", .{op_str});
+ op.* = .{ .reg = reg };
+ }
+ } else if (mem.startsWith(u8, op_str, "%[") and mem.endsWith(u8, op_str, "]")) {
+ switch (args.get(op_str["%[".len .. op_str.len - "]".len]) orelse
+ return self.fail("No matching constraint: '{s}'", .{op_str})) {
+ .register => |reg| op.* = .{ .reg = reg },
+ else => return self.fail("Invalid constraint: '{s}'", .{op_str}),
+ }
+ } else if (mem.startsWith(u8, op_str, "$")) {
+ if (std.fmt.parseInt(i32, op_str["$".len..], 0)) |s| {
+ if (mnem_size) |size| {
+ const max = @as(u64, math.maxInt(u64)) >>
+ @intCast(u6, 64 - (size.bitSize() - 1));
+ if ((if (s < 0) ~s else s) > max)
+ return self.fail("Invalid immediate size: '{s}'", .{op_str});
}
- } else if (mem.startsWith(u8, op_str, "%[") and mem.endsWith(u8, op_str, "]")) {
- switch (args.get(op_str["%[".len .. op_str.len - "]".len]) orelse
- return self.fail("No matching constraint: '{s}'", .{op_str})) {
- .register => |reg| op.* = .{ .reg = reg },
- else => return self.fail("Invalid constraint: '{s}'", .{op_str}),
+ op.* = .{ .imm = Immediate.s(s) };
+ } else |_| if (std.fmt.parseInt(u64, op_str["$".len..], 0)) |u| {
+ if (mnem_size) |size| {
+ const max = @as(u64, math.maxInt(u64)) >>
+ @intCast(u6, 64 - size.bitSize());
+ if (u > max)
+ return self.fail("Invalid immediate size: '{s}'", .{op_str});
}
- } else if (mem.startsWith(u8, op_str, "$")) {
- if (std.fmt.parseInt(i32, op_str["$".len..], 0)) |s| {
- if (mnem_size) |size| {
- const max = @as(u64, math.maxInt(u64)) >>
- @intCast(u6, 64 - (size.bitSize() - 1));
- if ((if (s < 0) ~s else s) > max)
- return self.fail("Invalid immediate size: '{s}'", .{op_str});
- }
- op.* = .{ .imm = Immediate.s(s) };
- } else |_| if (std.fmt.parseInt(u64, op_str["$".len..], 0)) |u| {
- if (mnem_size) |size| {
- const max = @as(u64, math.maxInt(u64)) >>
- @intCast(u6, 64 - size.bitSize());
- if (u > max)
- return self.fail("Invalid immediate size: '{s}'", .{op_str});
- }
- op.* = .{ .imm = Immediate.u(u) };
- } else |_| return self.fail("Invalid immediate: '{s}'", .{op_str});
- } else return self.fail("Invalid operand: '{s}'", .{op_str});
- } else if (op_it.next()) |op_str| return self.fail("Extra operand: '{s}'", .{op_str});
-
- (switch (ops[0]) {
- .none => self.asmOpOnly(mnem),
- .reg => |reg0| switch (ops[1]) {
- .none => self.asmRegister(mnem, reg0),
- .reg => |reg1| switch (ops[2]) {
- .none => self.asmRegisterRegister(mnem, reg1, reg0),
- .reg => |reg2| switch (ops[3]) {
- .none => self.asmRegisterRegisterRegister(mnem, reg2, reg1, reg0),
- else => error.InvalidInstruction,
- },
- .mem => |mem2| switch (ops[3]) {
- .none => self.asmMemoryRegisterRegister(mnem, mem2, reg1, reg0),
- else => error.InvalidInstruction,
- },
+ op.* = .{ .imm = Immediate.u(u) };
+ } else |_| return self.fail("Invalid immediate: '{s}'", .{op_str});
+ } else return self.fail("Invalid operand: '{s}'", .{op_str});
+ } else if (op_it.next()) |op_str| return self.fail("Extra operand: '{s}'", .{op_str});
+
+ (switch (ops[0]) {
+ .none => self.asmOpOnly(mnem),
+ .reg => |reg0| switch (ops[1]) {
+ .none => self.asmRegister(mnem, reg0),
+ .reg => |reg1| switch (ops[2]) {
+ .none => self.asmRegisterRegister(mnem, reg1, reg0),
+ .reg => |reg2| switch (ops[3]) {
+ .none => self.asmRegisterRegisterRegister(mnem, reg2, reg1, reg0),
else => error.InvalidInstruction,
},
- .mem => |mem1| switch (ops[2]) {
- .none => self.asmMemoryRegister(mnem, mem1, reg0),
+ .mem => |mem2| switch (ops[3]) {
+ .none => self.asmMemoryRegisterRegister(mnem, mem2, reg1, reg0),
else => error.InvalidInstruction,
},
else => error.InvalidInstruction,
},
- .mem => |mem0| switch (ops[1]) {
- .none => self.asmMemory(mnem, mem0),
- .reg => |reg1| switch (ops[2]) {
- .none => self.asmRegisterMemory(mnem, reg1, mem0),
- else => error.InvalidInstruction,
- },
+ .mem => |mem1| switch (ops[2]) {
+ .none => self.asmMemoryRegister(mnem, mem1, reg0),
else => error.InvalidInstruction,
},
- .imm => |imm0| switch (ops[1]) {
- .none => self.asmImmediate(mnem, imm0),
- .reg => |reg1| switch (ops[2]) {
- .none => self.asmRegisterImmediate(mnem, reg1, imm0),
- .reg => |reg2| switch (ops[3]) {
- .none => self.asmRegisterRegisterImmediate(mnem, reg2, reg1, imm0),
- else => error.InvalidInstruction,
- },
- .mem => |mem2| switch (ops[3]) {
- .none => self.asmMemoryRegisterImmediate(mnem, mem2, reg1, imm0),
- else => error.InvalidInstruction,
- },
+ else => error.InvalidInstruction,
+ },
+ .mem => |mem0| switch (ops[1]) {
+ .none => self.asmMemory(mnem, mem0),
+ .reg => |reg1| switch (ops[2]) {
+ .none => self.asmRegisterMemory(mnem, reg1, mem0),
+ else => error.InvalidInstruction,
+ },
+ else => error.InvalidInstruction,
+ },
+ .imm => |imm0| switch (ops[1]) {
+ .none => self.asmImmediate(mnem, imm0),
+ .reg => |reg1| switch (ops[2]) {
+ .none => self.asmRegisterImmediate(mnem, reg1, imm0),
+ .reg => |reg2| switch (ops[3]) {
+ .none => self.asmRegisterRegisterImmediate(mnem, reg2, reg1, imm0),
else => error.InvalidInstruction,
},
- .mem => |mem1| switch (ops[2]) {
- .none => self.asmMemoryImmediate(mnem, mem1, imm0),
+ .mem => |mem2| switch (ops[3]) {
+ .none => self.asmMemoryRegisterImmediate(mnem, mem2, reg1, imm0),
else => error.InvalidInstruction,
},
else => error.InvalidInstruction,
},
- }) catch |err| switch (err) {
- error.InvalidInstruction => return self.fail(
- "Invalid instruction: '{s} {s} {s} {s} {s}'",
- .{
- @tagName(mnem),
- @tagName(ops[0]),
- @tagName(ops[1]),
- @tagName(ops[2]),
- @tagName(ops[3]),
- },
- ),
- else => |e| return e,
- };
- }
+ .mem => |mem1| switch (ops[2]) {
+ .none => self.asmMemoryImmediate(mnem, mem1, imm0),
+ else => error.InvalidInstruction,
+ },
+ else => error.InvalidInstruction,
+ },
+ }) catch |err| switch (err) {
+ error.InvalidInstruction => return self.fail(
+ "Invalid instruction: '{s} {s} {s} {s} {s}'",
+ .{
+ @tagName(mnem),
+ @tagName(ops[0]),
+ @tagName(ops[1]),
+ @tagName(ops[2]),
+ @tagName(ops[3]),
+ },
+ ),
+ else => |e| return e,
+ };
}
simple: {
@@ -7052,25 +6788,10 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs);
return self.finishAir(inst, result, buf);
}
- var bt = try self.iterateBigTomb(inst, outputs.len + inputs.len);
- for (outputs) |output| {
- if (output == .none) continue;
-
- bt.feed(output);
- }
- for (inputs) |input| {
- bt.feed(input);
- }
- return bt.finishAir(result);
-}
-
-fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigTomb {
- try self.ensureProcessDeathCapacity(operand_count + 1);
- return BigTomb{
- .function = self,
- .inst = inst,
- .lbt = self.liveness.iterateBigTomb(inst),
- };
+ var bt = self.liveness.iterateBigTomb(inst);
+ for (outputs) |output| if (output != .none) self.feed(&bt, output);
+ for (inputs) |input| self.feed(&bt, input);
+ return self.finishAirResult(inst, result);
}
/// Sets the value without any modifications to register allocation metadata or stack allocation metadata.
@@ -7952,7 +7673,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result = if (self.liveness.isUnused(inst)) .dead else result: {
+ const result = result: {
const src_mcv = try self.resolveInst(un_op);
if (self.reuseOperand(inst, un_op, 0, src_mcv)) break :result src_mcv;
@@ -7966,7 +7687,7 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void {
fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result = if (self.liveness.isUnused(inst)) .dead else result: {
+ const result = result: {
const operand = try self.resolveInst(ty_op.operand);
if (self.reuseOperand(inst, ty_op.operand, 0, operand)) break :result operand;
@@ -7991,28 +7712,24 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const ptr = try self.resolveInst(ty_op.operand);
const array_ty = ptr_ty.childType();
const array_len = array_ty.arrayLen();
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: {
- const stack_offset = @intCast(i32, try self.allocMem(inst, 16, 16));
- try self.genSetStack(ptr_ty, stack_offset, ptr, .{});
- try self.genSetStack(Type.u64, stack_offset - 8, .{ .immediate = array_len }, .{});
- break :blk .{ .stack_offset = stack_offset };
- };
+
+ const stack_offset = @intCast(i32, try self.allocMem(inst, 16, 16));
+ try self.genSetStack(ptr_ty, stack_offset, ptr, .{});
+ try self.genSetStack(Type.u64, stack_offset - 8, .{ .immediate = array_len }, .{});
+
+ const result = MCValue{ .stack_offset = stack_offset };
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst))
- .dead
- else
- return self.fail("TODO implement airIntToFloat for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ _ = ty_op;
+ return self.fail("TODO implement airIntToFloat for {}", .{self.target.cpu.arch});
+ //return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- if (self.liveness.isUnused(inst))
- return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
const src_ty = self.air.typeOf(ty_op.operand);
const dst_ty = self.air.typeOfIndex(inst);
@@ -8114,7 +7831,7 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
}
const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
+ if (self.liveness.isUnused(inst)) break :result .unreach;
if (val_abi_size <= 8) {
self.eflags_inst = inst;
@@ -8212,7 +7929,7 @@ fn atomicOp(
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} } });
- return if (unused) .none else dst_mcv;
+ return if (unused) .unreach else dst_mcv;
},
.loop => _ = if (val_abi_size <= 8) {
const tmp_reg = try self.register_manager.allocReg(null, gp);
@@ -8285,7 +8002,7 @@ fn atomicOp(
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} } });
_ = try self.asmJccReloc(loop, .ne);
- return if (unused) .none else .{ .register = .rax };
+ return if (unused) .unreach else .{ .register = .rax };
} else {
try self.asmRegisterMemory(.mov, .rax, Memory.sib(.qword, .{
.base = ptr_mem.sib.base,
@@ -8354,7 +8071,7 @@ fn atomicOp(
} });
_ = try self.asmJccReloc(loop, .ne);
- if (unused) return .none;
+ if (unused) return .unreach;
const dst_mcv = try self.allocTempRegOrMem(val_ty, false);
try self.asmMemoryRegister(
.mov,
@@ -8396,27 +8113,22 @@ fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void {
fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void {
const atomic_load = self.air.instructions.items(.data)[inst].atomic_load;
- const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
-
- const ptr_ty = self.air.typeOf(atomic_load.ptr);
- const ptr_mcv = try self.resolveInst(atomic_load.ptr);
- const ptr_lock = switch (ptr_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
+ const ptr_ty = self.air.typeOf(atomic_load.ptr);
+ const ptr_mcv = try self.resolveInst(atomic_load.ptr);
+ const ptr_lock = switch (ptr_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
- const dst_mcv =
- if (self.reuseOperand(inst, atomic_load.ptr, 0, ptr_mcv))
- ptr_mcv
- else
- try self.allocRegOrMem(inst, true);
+ const dst_mcv =
+ if (self.reuseOperand(inst, atomic_load.ptr, 0, ptr_mcv))
+ ptr_mcv
+ else
+ try self.allocRegOrMem(inst, true);
- try self.load(dst_mcv, ptr_mcv, ptr_ty);
- break :result dst_mcv;
- };
- return self.finishAir(inst, result, .{ atomic_load.ptr, .none, .none });
+ try self.load(dst_mcv, ptr_mcv, ptr_ty);
+ return self.finishAir(inst, dst_mcv, .{ atomic_load.ptr, .none, .none });
}
fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void {
@@ -8459,7 +8171,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index) !void {
try self.genInlineMemset(dst_ptr, src_val, len, .{});
- return self.finishAir(inst, .none, .{ pl_op.operand, extra.lhs, extra.rhs });
+ return self.finishAir(inst, .unreach, .{ pl_op.operand, extra.lhs, extra.rhs });
}
fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
@@ -8489,128 +8201,129 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
try self.genInlineMemcpy(dst_ptr, src_ptr, len, .{});
- return self.finishAir(inst, .none, .{ pl_op.operand, extra.lhs, extra.rhs });
+ return self.finishAir(inst, .unreach, .{ pl_op.operand, extra.lhs, extra.rhs });
}
fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else {
- _ = operand;
- return self.fail("TODO implement airTagName for x86_64", .{});
- };
- return self.finishAir(inst, result, .{ un_op, .none, .none });
+ _ = operand;
+ return self.fail("TODO implement airTagName for x86_64", .{});
+ //return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const err_ty = self.air.typeOf(un_op);
- const err_mcv = try self.resolveInst(un_op);
- const err_reg = try self.copyToTmpRegister(err_ty, err_mcv);
- const err_lock = self.register_manager.lockRegAssumeUnused(err_reg);
- defer self.register_manager.unlockReg(err_lock);
-
- const addr_reg = try self.register_manager.allocReg(null, gp);
- const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
- defer self.register_manager.unlockReg(addr_lock);
-
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForLazySymbol(
- .{ .kind = .const_data, .ty = Type.anyerror },
- 4, // dword alignment
- );
- const got_addr = elf_file.getAtom(atom_index).getOffsetTableAddress(elf_file);
- try self.asmRegisterMemory(.mov, addr_reg.to64(), Memory.sib(.qword, .{
- .base = .ds,
- .disp = @intCast(i32, got_addr),
- }));
- } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom_index = try coff_file.getOrCreateAtomForLazySymbol(
- .{ .kind = .const_data, .ty = Type.anyerror },
- 4, // dword alignment
- );
- const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
- try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index });
- } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- const atom_index = try macho_file.getOrCreateAtomForLazySymbol(
- .{ .kind = .const_data, .ty = Type.anyerror },
- 4, // dword alignment
- );
- const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
- try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index });
- } else {
- return self.fail("TODO implement airErrorName for x86_64 {s}", .{@tagName(self.bin_file.tag)});
- }
-
- const start_reg = try self.register_manager.allocReg(null, gp);
- const start_lock = self.register_manager.lockRegAssumeUnused(start_reg);
- defer self.register_manager.unlockReg(start_lock);
- const end_reg = try self.register_manager.allocReg(null, gp);
- const end_lock = self.register_manager.lockRegAssumeUnused(end_reg);
- defer self.register_manager.unlockReg(end_lock);
+ const err_ty = self.air.typeOf(un_op);
+ const err_mcv = try self.resolveInst(un_op);
+ const err_reg = try self.copyToTmpRegister(err_ty, err_mcv);
+ const err_lock = self.register_manager.lockRegAssumeUnused(err_reg);
+ defer self.register_manager.unlockReg(err_lock);
- try self.truncateRegister(err_ty, err_reg.to32());
+ const addr_reg = try self.register_manager.allocReg(null, gp);
+ const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
+ defer self.register_manager.unlockReg(addr_lock);
- try self.asmRegisterMemory(.mov, start_reg.to32(), Memory.sib(.dword, .{
- .base = addr_reg.to64(),
- .scale_index = .{ .scale = 4, .index = err_reg.to64() },
- .disp = 4,
- }));
- try self.asmRegisterMemory(.mov, end_reg.to32(), Memory.sib(.dword, .{
- .base = addr_reg.to64(),
- .scale_index = .{ .scale = 4, .index = err_reg.to64() },
- .disp = 8,
- }));
- try self.asmRegisterRegister(.sub, end_reg.to32(), start_reg.to32());
- try self.asmRegisterMemory(.lea, start_reg.to64(), Memory.sib(.byte, .{
- .base = addr_reg.to64(),
- .scale_index = .{ .scale = 1, .index = start_reg.to64() },
- .disp = 0,
- }));
- try self.asmRegisterMemory(.lea, end_reg.to32(), Memory.sib(.byte, .{
- .base = end_reg.to64(),
- .disp = -1,
+ if (self.bin_file.cast(link.File.Elf)) |elf_file| {
+ const atom_index = try elf_file.getOrCreateAtomForLazySymbol(
+ .{ .kind = .const_data, .ty = Type.anyerror },
+ 4, // dword alignment
+ );
+ const got_addr = elf_file.getAtom(atom_index).getOffsetTableAddress(elf_file);
+ try self.asmRegisterMemory(.mov, addr_reg.to64(), Memory.sib(.qword, .{
+ .base = .ds,
+ .disp = @intCast(i32, got_addr),
}));
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
+ const atom_index = try coff_file.getOrCreateAtomForLazySymbol(
+ .{ .kind = .const_data, .ty = Type.anyerror },
+ 4, // dword alignment
+ );
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
+ try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index });
+ } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
+ const atom_index = try macho_file.getOrCreateAtomForLazySymbol(
+ .{ .kind = .const_data, .ty = Type.anyerror },
+ 4, // dword alignment
+ );
+ const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
+ try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index });
+ } else {
+ return self.fail("TODO implement airErrorName for x86_64 {s}", .{@tagName(self.bin_file.tag)});
+ }
- const dst_mcv = try self.allocRegOrMem(inst, false);
- try self.asmMemoryRegister(.mov, Memory.sib(.qword, .{
- .base = .rbp,
- .disp = 0 - dst_mcv.stack_offset,
- }), start_reg.to64());
- try self.asmMemoryRegister(.mov, Memory.sib(.qword, .{
- .base = .rbp,
- .disp = 8 - dst_mcv.stack_offset,
- }), end_reg.to64());
- break :result dst_mcv;
- };
- return self.finishAir(inst, result, .{ un_op, .none, .none });
+ const start_reg = try self.register_manager.allocReg(null, gp);
+ const start_lock = self.register_manager.lockRegAssumeUnused(start_reg);
+ defer self.register_manager.unlockReg(start_lock);
+
+ const end_reg = try self.register_manager.allocReg(null, gp);
+ const end_lock = self.register_manager.lockRegAssumeUnused(end_reg);
+ defer self.register_manager.unlockReg(end_lock);
+
+ try self.truncateRegister(err_ty, err_reg.to32());
+
+ try self.asmRegisterMemory(.mov, start_reg.to32(), Memory.sib(.dword, .{
+ .base = addr_reg.to64(),
+ .scale_index = .{ .scale = 4, .index = err_reg.to64() },
+ .disp = 4,
+ }));
+ try self.asmRegisterMemory(.mov, end_reg.to32(), Memory.sib(.dword, .{
+ .base = addr_reg.to64(),
+ .scale_index = .{ .scale = 4, .index = err_reg.to64() },
+ .disp = 8,
+ }));
+ try self.asmRegisterRegister(.sub, end_reg.to32(), start_reg.to32());
+ try self.asmRegisterMemory(.lea, start_reg.to64(), Memory.sib(.byte, .{
+ .base = addr_reg.to64(),
+ .scale_index = .{ .scale = 1, .index = start_reg.to64() },
+ .disp = 0,
+ }));
+ try self.asmRegisterMemory(.lea, end_reg.to32(), Memory.sib(.byte, .{
+ .base = end_reg.to64(),
+ .disp = -1,
+ }));
+
+ const dst_mcv = try self.allocRegOrMem(inst, false);
+ try self.asmMemoryRegister(.mov, Memory.sib(.qword, .{
+ .base = .rbp,
+ .disp = 0 - dst_mcv.stack_offset,
+ }), start_reg.to64());
+ try self.asmMemoryRegister(.mov, Memory.sib(.qword, .{
+ .base = .rbp,
+ .disp = 8 - dst_mcv.stack_offset,
+ }), end_reg.to64());
+
+ return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none });
}
fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airSplat for x86_64", .{});
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ _ = ty_op;
+ return self.fail("TODO implement airSplat for x86_64", .{});
+ //return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airSelect for x86_64", .{});
- return self.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs });
+ _ = extra;
+ return self.fail("TODO implement airSelect for x86_64", .{});
+ //return self.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs });
}
fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airShuffle for x86_64", .{});
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ _ = ty_op;
+ return self.fail("TODO implement airShuffle for x86_64", .{});
+ //return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
const reduce = self.air.instructions.items(.data)[inst].reduce;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airReduce for x86_64", .{});
- return self.finishAir(inst, result, .{ reduce.operand, .none, .none });
+ _ = reduce;
+ return self.fail("TODO implement airReduce for x86_64", .{});
+ //return self.finishAir(inst, result, .{ reduce.operand, .none, .none });
}
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
@@ -8620,8 +8333,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const abi_size = @intCast(u32, result_ty.abiSize(self.target.*));
const abi_align = result_ty.abiAlignment(self.target.*);
- const result: MCValue = res: {
- if (self.liveness.isUnused(inst)) break :res MCValue.dead;
+ const result: MCValue = result: {
switch (result_ty.zigTypeTag()) {
.Struct => {
const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align));
@@ -8712,7 +8424,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
};
try self.genSetStack(elem_ty, stack_offset - elem_off, mat_elem_mcv, .{});
}
- break :res .{ .stack_offset = stack_offset };
+ break :result .{ .stack_offset = stack_offset };
},
.Array => {
const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align));
@@ -8728,7 +8440,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const elem_off = @intCast(i32, elem_size * elem_i);
try self.genSetStack(elem_ty, stack_offset - elem_off, mat_elem_mcv, .{});
}
- break :res MCValue{ .stack_offset = stack_offset };
+ break :result MCValue{ .stack_offset = stack_offset };
},
.Vector => return self.fail("TODO implement aggregate_init for vectors", .{}),
else => unreachable,
@@ -8740,82 +8452,70 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
std.mem.copy(Air.Inst.Ref, &buf, elements);
return self.finishAir(inst, result, buf);
}
- var bt = try self.iterateBigTomb(inst, elements.len);
- for (elements) |elem| {
- bt.feed(elem);
- }
- return bt.finishAir(result);
+ var bt = self.liveness.iterateBigTomb(inst);
+ for (elements) |elem| self.feed(&bt, elem);
+ return self.finishAirResult(inst, result);
}
fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
- const result: MCValue = res: {
- if (self.liveness.isUnused(inst)) break :res MCValue.dead;
- return self.fail("TODO implement airAggregateInit for x86_64", .{});
- };
- return self.finishAir(inst, result, .{ extra.init, .none, .none });
+ _ = extra;
+ return self.fail("TODO implement airAggregateInit for x86_64", .{});
+ //return self.finishAir(inst, result, .{ extra.init, .none, .none });
}
fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void {
const prefetch = self.air.instructions.items(.data)[inst].prefetch;
- return self.finishAir(inst, MCValue.dead, .{ prefetch.ptr, .none, .none });
+ return self.finishAir(inst, .unreach, .{ prefetch.ptr, .none, .none });
}
fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else {
- return self.fail("TODO implement airMulAdd for x86_64", .{});
- };
- return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, pl_op.operand });
+ _ = extra;
+ return self.fail("TODO implement airMulAdd for x86_64", .{});
+ //return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, pl_op.operand });
}
-fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
- // First section of indexes correspond to a set number of constant values.
- const ref_int = @enumToInt(inst);
- if (ref_int < Air.Inst.Ref.typed_value_map.len) {
- const tv = Air.Inst.Ref.typed_value_map[ref_int];
- if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) {
- return .none;
- }
- return self.genTypedValue(tv);
- }
+fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
+ const ty = self.air.typeOf(ref);
// If the type has no codegen bits, no need to store it.
- const inst_ty = self.air.typeOf(inst);
- if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError())
- return .none;
-
- const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
- switch (self.air.instructions.items(.tag)[inst_index]) {
- .constant => {
- // Constants have static lifetimes, so they are always memoized in the outer most table.
- const branch = &self.branch_stack.items[0];
- const gop = try branch.inst_table.getOrPut(self.gpa, inst_index);
- if (!gop.found_existing) {
- const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl;
- gop.value_ptr.* = try self.genTypedValue(.{
- .ty = inst_ty,
- .val = self.air.values[ty_pl.payload],
- });
- }
- return gop.value_ptr.*;
- },
- .const_ty => unreachable,
- else => return self.getResolvedInstValue(inst_index).?.*,
+ if (!ty.hasRuntimeBitsIgnoreComptime() and !ty.isError()) return .none;
+
+ if (Air.refToIndex(ref)) |inst| {
+ const mcv = switch (self.air.instructions.items(.tag)[inst]) {
+ .constant => tracking: {
+ const gop = try self.const_tracking.getOrPut(self.gpa, inst);
+ if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(try self.genTypedValue(.{
+ .ty = ty,
+ .val = self.air.value(ref).?,
+ }));
+ break :tracking gop.value_ptr;
+ },
+ .const_ty => unreachable,
+ else => self.inst_tracking.getPtr(inst).?,
+ }.short;
+ switch (mcv) {
+ .none, .unreach, .dead => unreachable,
+ else => return mcv,
+ }
}
+
+ return self.genTypedValue(.{ .ty = ty, .val = self.air.value(ref).? });
}
-fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) ?*MCValue {
- // Treat each stack item as a "layer" on top of the previous one.
- var i: usize = self.branch_stack.items.len;
- while (true) {
- i -= 1;
- if (self.branch_stack.items[i].inst_table.getPtr(inst)) |mcv| {
- return if (mcv.* != .dead) mcv else null;
- }
- }
+fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking {
+ const tracking = switch (self.air.instructions.items(.tag)[inst]) {
+ .constant => &self.const_tracking,
+ .const_ty => unreachable,
+ else => &self.inst_tracking,
+ }.getPtr(inst).?;
+ return switch (tracking.short) {
+ .none, .unreach, .dead => unreachable,
+ else => tracking,
+ };
}
/// If the MCValue is an immediate, and it does not fit within this type,
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index f659827f09..d2d594c901 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -78,7 +78,6 @@ const LoopDepth = u16;
const Local = struct {
cty_idx: CType.Index,
alignas: CType.AlignAs,
- is_in_clone: bool,
pub fn getType(local: Local) LocalType {
return .{ .cty_idx = local.cty_idx, .alignas = local.alignas };
@@ -275,16 +274,13 @@ pub const Function = struct {
/// All the locals, to be emitted at the top of the function.
locals: std.ArrayListUnmanaged(Local) = .{},
/// Which locals are available for reuse, based on Type.
- /// Only locals in the last stack entry are available for reuse,
- /// other entries will become available on loop exit.
free_locals_map: LocalsMap = .{},
- is_in_clone: bool = false,
/// Locals which will not be freed by Liveness. This is used after a
/// Function body is lowered in order to make `free_locals_map` have
/// 100% of the locals within so that it can be used to render the block
/// of variable declarations at the top of a function, sorted descending
/// by type alignment.
- /// The value is whether the alloc is static or not.
+ /// The value is whether the alloc needs to be emitted in the header.
allocs: std.AutoArrayHashMapUnmanaged(LocalIndex, bool) = .{},
/// Needed for memory used by the keys of free_locals_map entries.
arena: std.heap.ArenaAllocator,
@@ -302,7 +298,7 @@ pub const Function = struct {
const alignment = 0;
const decl_c_value = try f.allocLocalValue(ty, alignment);
const gpa = f.object.dg.gpa;
- try f.allocs.put(gpa, decl_c_value.new_local, true);
+ try f.allocs.put(gpa, decl_c_value.new_local, false);
try writer.writeAll("static ");
try f.object.dg.renderTypeAndName(writer, ty, decl_c_value, Const, alignment, .complete);
try writer.writeAll(" = ");
@@ -323,14 +319,15 @@ pub const Function = struct {
};
}
- /// Skips the reuse logic.
+ /// Skips the reuse logic. This function should be used for any persistent allocation, i.e.
+ /// those which go into `allocs`. This function does not add the resulting local into `allocs`;
+ /// that responsibility lies with the caller.
fn allocLocalValue(f: *Function, ty: Type, alignment: u32) !CValue {
const gpa = f.object.dg.gpa;
const target = f.object.dg.module.getTarget();
try f.locals.append(gpa, .{
.cty_idx = try f.typeToIndex(ty, .complete),
.alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)),
- .is_in_clone = f.is_in_clone,
});
return .{ .new_local = @intCast(LocalIndex, f.locals.items.len - 1) };
}
@@ -341,7 +338,8 @@ pub const Function = struct {
return result;
}
- /// Only allocates the local; does not print anything.
+ /// Only allocates the local; does not print anything. Will attempt to re-use locals, so should
+ /// not be used for persistent locals (i.e. those in `allocs`).
fn allocAlignedLocal(f: *Function, ty: Type, _: CQualifiers, alignment: u32) !CValue {
const target = f.object.dg.module.getTarget();
if (f.free_locals_map.getPtr(.{
@@ -2586,7 +2584,7 @@ pub fn genFunc(f: *Function) !void {
f.free_locals_map.clearRetainingCapacity();
const main_body = f.air.getMainBody();
- try genBody(f, main_body);
+ try genBodyResolveState(f, undefined, &.{}, main_body, false);
try o.indent_writer.insertNewline();
@@ -2597,8 +2595,8 @@ pub fn genFunc(f: *Function) !void {
// alignment, descending.
const free_locals = &f.free_locals_map;
assert(f.value_map.count() == 0); // there must not be any unfreed locals
- for (f.allocs.keys(), f.allocs.values()) |local_index, value| {
- if (value) continue; // static
+ for (f.allocs.keys(), f.allocs.values()) |local_index, should_emit| {
+ if (!should_emit) continue;
const local = f.locals.items[local_index];
log.debug("inserting local {d} into free_locals", .{local_index});
const gop = try free_locals.getOrPut(gpa, local.getType());
@@ -2715,6 +2713,10 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void {
}
}
+/// Generate code for an entire body which ends with a `noreturn` instruction. The states of
+/// `value_map` and `free_locals_map` are undefined after the generation, and new locals may not
+/// have been added to `free_locals_map`. For a version of this function that restores this state,
+/// see `genBodyResolveState`.
fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void {
const writer = f.object.writer();
if (body.len == 0) {
@@ -2728,10 +2730,69 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
}
}
+/// Generate code for an entire body which ends with a `noreturn` instruction. The states of
+/// `value_map` and `free_locals_map` are restored to their original values, and any non-allocated
+/// locals introduced within the body are correctly added to `free_locals_map`. Operands in
+/// `leading_deaths` have their deaths processed before the body is generated.
+/// A scope is introduced (using braces) only if `inner` is `false`.
+/// If `leading_deaths` is empty, `inst` may be `undefined`.
+fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []const Air.Inst.Index, body: []const Air.Inst.Index, inner: bool) error{ AnalysisFail, OutOfMemory }!void {
+ if (body.len == 0) {
+ // Don't go to the expense of cloning everything!
+ if (!inner) try f.object.writer().writeAll("{}");
+ return;
+ }
+
+ // TODO: we can probably avoid the copies in some other common cases too.
+
+ const gpa = f.object.dg.gpa;
+
+ // Save the original value_map and free_locals_map so that we can restore them after the body.
+ var old_value_map = try f.value_map.clone();
+ defer old_value_map.deinit();
+ var old_free_locals = try cloneFreeLocalsMap(gpa, &f.free_locals_map);
+ defer deinitFreeLocalsMap(gpa, &old_free_locals);
+
+ // Remember how many locals there were before entering the body so that we can free any that
+ // were newly introduced. Any new locals must necessarily be logically free after the then
+ // branch is complete.
+ const pre_locals_len = @intCast(LocalIndex, f.locals.items.len);
+
+ for (leading_deaths) |death| {
+ try die(f, inst, Air.indexToRef(death));
+ }
+
+ if (inner) {
+ try genBodyInner(f, body);
+ } else {
+ try genBody(f, body);
+ }
+
+ f.value_map.deinit();
+ f.value_map = old_value_map.move();
+ deinitFreeLocalsMap(gpa, &f.free_locals_map);
+ f.free_locals_map = old_free_locals.move();
+
+ // Now, use the lengths we stored earlier to detect any locals the body generated, and free
+ // them, unless they were used to store allocs.
+
+ for (pre_locals_len..f.locals.items.len) |local_i| {
+ const local_index = @intCast(LocalIndex, local_i);
+ if (f.allocs.contains(local_index)) {
+ continue;
+ }
+ try freeLocal(f, inst, local_index, 0);
+ }
+}
+
fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void {
const air_tags = f.air.instructions.items(.tag);
for (body) |inst| {
+ if (f.liveness.isUnused(inst) and !f.air.mustLower(inst)) {
+ continue;
+ }
+
const result_value = switch (air_tags[inst]) {
// zig fmt: off
.constant => unreachable, // excluded from function bodies
@@ -3009,11 +3070,6 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: []const u8) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
-
const inst_ty = f.air.typeOfIndex(inst);
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -3032,10 +3088,7 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [
fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.air.typeOfIndex(inst);
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const ptr_ty = f.air.typeOf(bin_op.lhs);
- if ((!ptr_ty.isVolatilePtr() and f.liveness.isUnused(inst)) or
- !inst_ty.hasRuntimeBitsIgnoreComptime())
- {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime()) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -3074,11 +3127,6 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- return .none;
- }
-
const inst_ty = f.air.typeOfIndex(inst);
const ptr_ty = f.air.typeOf(bin_op.lhs);
const child_ty = ptr_ty.childType();
@@ -3116,10 +3164,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.air.typeOfIndex(inst);
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const slice_ty = f.air.typeOf(bin_op.lhs);
- if ((!slice_ty.isVolatilePtr() and f.liveness.isUnused(inst)) or
- !inst_ty.hasRuntimeBitsIgnoreComptime())
- {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime()) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -3158,11 +3203,6 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- return .none;
- }
-
const slice_ty = f.air.typeOf(bin_op.lhs);
const child_ty = slice_ty.elemType2();
const slice = try f.resolveInst(bin_op.lhs);
@@ -3188,7 +3228,7 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const inst_ty = f.air.typeOfIndex(inst);
- if (f.liveness.isUnused(inst) or !inst_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime()) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -3224,40 +3264,34 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
- if (f.liveness.isUnused(inst)) return .none;
-
const inst_ty = f.air.typeOfIndex(inst);
const elem_type = inst_ty.elemType();
if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime()) return .{ .undef = inst_ty };
const target = f.object.dg.module.getTarget();
- const local = try f.allocAlignedLocal(
+ const local = try f.allocLocalValue(
elem_type,
- CQualifiers.init(.{ .@"const" = inst_ty.isConstPtr() }),
inst_ty.ptrAlignment(target),
);
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
const gpa = f.object.dg.module.gpa;
- try f.allocs.put(gpa, local.new_local, false);
+ try f.allocs.put(gpa, local.new_local, true);
return .{ .local_ref = local.new_local };
}
fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue {
- if (f.liveness.isUnused(inst)) return .none;
-
const inst_ty = f.air.typeOfIndex(inst);
const elem_ty = inst_ty.elemType();
if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return .{ .undef = inst_ty };
const target = f.object.dg.module.getTarget();
- const local = try f.allocAlignedLocal(
+ const local = try f.allocLocalValue(
elem_ty,
- CQualifiers.init(.{ .@"const" = inst_ty.isConstPtr() }),
inst_ty.ptrAlignment(target),
);
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
const gpa = f.object.dg.module.gpa;
- try f.allocs.put(gpa, local.new_local, false);
+ try f.allocs.put(gpa, local.new_local, true);
return .{ .local_ref = local.new_local };
}
@@ -3293,9 +3327,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const ptr_info = ptr_scalar_ty.ptrInfo().data;
const src_ty = ptr_info.pointee_type;
- if (!src_ty.hasRuntimeBitsIgnoreComptime() or
- (!ptr_info.@"volatile" and f.liveness.isUnused(inst)))
- {
+ if (!src_ty.hasRuntimeBitsIgnoreComptime()) {
try reap(f, inst, &.{ty_op.operand});
return .none;
}
@@ -3442,11 +3474,6 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
-
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -3470,10 +3497,6 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue {
fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -3569,10 +3592,6 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
fn airBoolToInt(f: *Function, inst: Air.Inst.Index) !CValue {
const un_op = f.air.instructions.items(.data)[inst].un_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{un_op});
- return .none;
- }
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
const writer = f.object.writer();
@@ -3746,11 +3765,6 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info:
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- return .none;
- }
-
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
@@ -3790,11 +3804,6 @@ fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
const scalar_ty = operand_ty.scalarType();
if (scalar_ty.tag() != .bool) return try airUnBuiltinCall(f, inst, "not", .bits);
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
-
const op = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -3829,11 +3838,6 @@ fn airBinOp(
if ((scalar_ty.isInt() and scalar_ty.bitSize(target) > 64) or scalar_ty.isRuntimeFloat())
return try airBinBuiltinCall(f, inst, operation, info);
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- return .none;
- }
-
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
@@ -3865,11 +3869,6 @@ fn airCmpOp(
data: anytype,
operator: std.math.CompareOperator,
) !CValue {
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ data.lhs, data.rhs });
- return .none;
- }
-
const operand_ty = f.air.typeOf(data.lhs);
const scalar_ty = operand_ty.scalarType();
@@ -3918,11 +3917,6 @@ fn airEquality(
) !CValue {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- return .none;
- }
-
const operand_ty = f.air.typeOf(bin_op.lhs);
const target = f.object.dg.module.getTarget();
const operand_bits = operand_ty.bitSize(target);
@@ -3987,11 +3981,6 @@ fn airEquality(
fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue {
const un_op = f.air.instructions.items(.data)[inst].un_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{un_op});
- return .none;
- }
-
const inst_ty = f.air.typeOfIndex(inst);
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
@@ -4008,10 +3997,6 @@ fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue {
fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- return .none;
- }
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
@@ -4059,11 +4044,6 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []const u8) !CValue {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- return .none;
- }
-
const inst_ty = f.air.typeOfIndex(inst);
const inst_scalar_ty = inst_ty.scalarType();
@@ -4107,11 +4087,6 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- return .none;
- }
-
const ptr = try f.resolveInst(bin_op.lhs);
const len = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
@@ -4316,6 +4291,7 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.Block, ty_pl.payload);
const body = f.air.extra[extra.end..][0..extra.data.body_len];
+ const liveness_block = f.liveness.getBlock(inst);
const block_id: usize = f.next_block_index;
f.next_block_index += 1;
@@ -4332,7 +4308,15 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue {
.result = result,
});
- try genBodyInner(f, body);
+ try genBodyResolveState(f, inst, &.{}, body, true);
+
+ assert(f.blocks.remove(inst));
+
+ // The body might result in some values we had beforehand being killed
+ for (liveness_block.deaths) |death| {
+ try die(f, inst, Air.indexToRef(death));
+ }
+
try f.object.indent_writer.insertNewline();
// label might be unused, add a dummy goto
// label must be followed by an expression, add an empty one.
@@ -4366,6 +4350,7 @@ fn lowerTry(
) !CValue {
const err_union = try f.resolveInst(operand);
const result_ty = f.air.typeOfIndex(inst);
+ const liveness_condbr = f.liveness.getCondBr(inst);
const writer = f.object.writer();
const payload_ty = err_union_ty.errorUnionPayload();
const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime();
@@ -4389,10 +4374,15 @@ fn lowerTry(
}
try writer.writeByte(')');
- try genBody(f, body);
+ try genBodyResolveState(f, inst, liveness_condbr.else_deaths, body, false);
try f.object.indent_writer.insertNewline();
}
+ // Now we have the "then branch" (in terms of the liveness data); process any deaths.
+ for (liveness_condbr.then_deaths) |death| {
+ try die(f, inst, Air.indexToRef(death));
+ }
+
if (!payload_has_bits) {
if (!operand_is_ptr) {
return .none;
@@ -4466,10 +4456,6 @@ fn airBr(f: *Function, inst: Air.Inst.Index) !CValue {
fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const dest_ty = f.air.typeOfIndex(inst);
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -4593,7 +4579,6 @@ fn airBreakpoint(writer: anytype) !CValue {
}
fn airRetAddr(f: *Function, inst: Air.Inst.Index) !CValue {
- if (f.liveness.isUnused(inst)) return .none;
const writer = f.object.writer();
const local = try f.allocLocal(inst, Type.usize);
try f.writeCValue(writer, local, .Other);
@@ -4604,7 +4589,6 @@ fn airRetAddr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airFrameAddress(f: *Function, inst: Air.Inst.Index) !CValue {
- if (f.liveness.isUnused(inst)) return .none;
const writer = f.object.writer();
const local = try f.allocLocal(inst, Type.usize);
try f.writeCValue(writer, local, .Other);
@@ -4637,17 +4621,12 @@ fn airLoop(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const loop = f.air.extraData(Air.Block, ty_pl.payload);
const body = f.air.extra[loop.end..][0..loop.data.body_len];
- const liveness_loop = f.liveness.getLoop(inst);
const writer = f.object.writer();
try writer.writeAll("for (;;) ");
- try genBody(f, body);
+ try genBody(f, body); // no need to restore state, we're noreturn
try writer.writeByte('\n');
- for (liveness_loop.deaths) |operand| {
- try die(f, inst, Air.indexToRef(operand));
- }
-
return .none;
}
@@ -4661,61 +4640,24 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue {
const liveness_condbr = f.liveness.getCondBr(inst);
const writer = f.object.writer();
- // Keep using the original for the then branch; use a clone of the value
- // map for the else branch.
- const gpa = f.object.dg.gpa;
- var cloned_map = try f.value_map.clone();
- defer cloned_map.deinit();
- var cloned_frees = try cloneFreeLocalsMap(gpa, &f.free_locals_map);
- defer deinitFreeLocalsMap(gpa, &cloned_frees);
-
- // Remember how many locals there were before entering the then branch so
- // that we can notice and use them in the else branch. Any new locals must
- // necessarily be free already after the then branch is complete.
- const pre_locals_len = @intCast(LocalIndex, f.locals.items.len);
- // Remember how many allocs there were before entering the then branch so
- // that we can notice and make sure not to use them in the else branch.
- // Any new allocs must be removed from the free list.
- const pre_allocs_len = @intCast(LocalIndex, f.allocs.count());
- const was_in_clone = f.is_in_clone;
- f.is_in_clone = true;
-
- for (liveness_condbr.then_deaths) |operand| {
- try die(f, inst, Air.indexToRef(operand));
- }
-
try writer.writeAll("if (");
try f.writeCValue(writer, cond, .Other);
try writer.writeAll(") ");
- try genBody(f, then_body);
- // TODO: If body ends in goto, elide the else block?
- const needs_else = then_body.len <= 0 or f.air.instructions.items(.tag)[then_body[then_body.len - 1]] != .br;
- if (needs_else) {
- try writer.writeAll(" else ");
- } else {
- try writer.writeByte('\n');
- }
+ try genBodyResolveState(f, inst, liveness_condbr.then_deaths, then_body, false);
- f.value_map.deinit();
- f.value_map = cloned_map.move();
- const free_locals = &f.free_locals_map;
- deinitFreeLocalsMap(gpa, free_locals);
- free_locals.* = cloned_frees.move();
- f.is_in_clone = was_in_clone;
- for (liveness_condbr.else_deaths) |operand| {
- try die(f, inst, Air.indexToRef(operand));
- }
-
- try noticeBranchFrees(f, pre_locals_len, pre_allocs_len, inst);
+ // We don't need to use `genBodyResolveState` for the else block, because this instruction is
+ // noreturn so must terminate a body, therefore we don't need to leave `value_map` or
+ // `free_locals_map` well defined (our parent is responsible for doing that).
- if (needs_else) {
- try genBody(f, else_body);
- } else {
- try genBodyInner(f, else_body);
+ for (liveness_condbr.else_deaths) |death| {
+ try die(f, inst, Air.indexToRef(death));
}
- try f.object.indent_writer.insertNewline();
+ // We never actually need an else block, because our branches are noreturn so must (for
+ // instance) `br` to a block (label).
+
+ try genBodyInner(f, else_body);
return .none;
}
@@ -4746,9 +4688,8 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
const liveness = try f.liveness.getSwitchBr(gpa, inst, switch_br.data.cases_len + 1);
defer gpa.free(liveness.deaths);
- // On the final iteration we do not clone the map. This ensures that
- // lowering proceeds after the switch_br taking into account the
- // mutations to the liveness information.
+ // On the final iteration we do not need to fix any state. This is because, like in the `else`
+ // branch of a `cond_br`, our parent has to do it for this entire body anyway.
const last_case_i = switch_br.data.cases_len - @boolToInt(switch_br.data.else_body_len == 0);
var extra_index: usize = switch_br.end;
@@ -4772,56 +4713,23 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte(' ');
if (case_i != last_case_i) {
- const old_value_map = f.value_map;
- f.value_map = try old_value_map.clone();
- var free_locals = &f.free_locals_map;
- const old_free_locals = free_locals.*;
- free_locals.* = try cloneFreeLocalsMap(gpa, free_locals);
-
- // Remember how many locals there were before entering each branch so that
- // we can notice and use them in subsequent branches. Any new locals must
- // necessarily be free already after the previous branch is complete.
- const pre_locals_len = @intCast(LocalIndex, f.locals.items.len);
- // Remember how many allocs there were before entering each branch so that
- // we can notice and make sure not to use them in subsequent branches.
- // Any new allocs must be removed from the free list.
- const pre_allocs_len = @intCast(LocalIndex, f.allocs.count());
- const was_in_clone = f.is_in_clone;
- f.is_in_clone = true;
-
- {
- defer {
- f.is_in_clone = was_in_clone;
- f.value_map.deinit();
- deinitFreeLocalsMap(gpa, free_locals);
- f.value_map = old_value_map;
- free_locals.* = old_free_locals;
- }
-
- for (liveness.deaths[case_i]) |operand| {
- try die(f, inst, Air.indexToRef(operand));
- }
-
- try genBody(f, case_body);
- }
-
- try noticeBranchFrees(f, pre_locals_len, pre_allocs_len, inst);
+ try genBodyResolveState(f, inst, liveness.deaths[case_i], case_body, false);
} else {
- for (liveness.deaths[case_i]) |operand| {
- try die(f, inst, Air.indexToRef(operand));
+ for (liveness.deaths[case_i]) |death| {
+ try die(f, inst, Air.indexToRef(death));
}
try genBody(f, case_body);
}
// The case body must be noreturn so we don't need to insert a break.
-
}
const else_body = f.air.extra[extra_index..][0..switch_br.data.else_body_len];
try f.object.indent_writer.insertNewline();
if (else_body.len > 0) {
- for (liveness.deaths[liveness.deaths.len - 1]) |operand| {
- try die(f, inst, Air.indexToRef(operand));
+ // Note that this must be the last case (i.e. the `last_case_i` case was not hit above)
+ for (liveness.deaths[liveness.deaths.len - 1]) |death| {
+ try die(f, inst, Air.indexToRef(death));
}
try writer.writeAll("default: ");
try genBody(f, else_body);
@@ -4848,6 +4756,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const extra = f.air.extraData(Air.Asm, ty_pl.payload);
const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
const clobbers_len = @truncate(u31, extra.data.flags);
+ const gpa = f.object.dg.gpa;
var extra_i: usize = extra.end;
const outputs = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
@@ -4855,8 +4764,6 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
extra_i += inputs.len;
const result = result: {
- if (!is_volatile and f.liveness.isUnused(inst)) break :result .none;
-
const writer = f.object.writer();
const inst_ty = f.air.typeOfIndex(inst);
const local = if (inst_ty.hasRuntimeBitsIgnoreComptime()) local: {
@@ -4892,6 +4799,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("register ");
const alignment = 0;
const local_value = try f.allocLocalValue(output_ty, alignment);
+ try f.allocs.put(gpa, local_value.new_local, false);
try f.object.dg.renderTypeAndName(writer, output_ty, local_value, .{}, alignment, .complete);
try writer.writeAll(" __asm(\"");
try writer.writeAll(constraint["={".len .. constraint.len - "}".len]);
@@ -4924,6 +4832,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
if (is_reg) try writer.writeAll("register ");
const alignment = 0;
const local_value = try f.allocLocalValue(input_ty, alignment);
+ try f.allocs.put(gpa, local_value.new_local, false);
try f.object.dg.renderTypeAndName(writer, input_ty, local_value, Const, alignment, .complete);
if (is_reg) {
try writer.writeAll(" __asm(\"");
@@ -5106,11 +5015,6 @@ fn airIsNull(
) !CValue {
const un_op = f.air.instructions.items(.data)[inst].un_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{un_op});
- return .none;
- }
-
const writer = f.object.writer();
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
@@ -5156,11 +5060,6 @@ fn airIsNull(
fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
-
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const opt_ty = f.air.typeOf(ty_op.operand);
@@ -5208,11 +5107,6 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue {
fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
-
const writer = f.object.writer();
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -5342,11 +5236,6 @@ fn airStructFieldPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.StructField, ty_pl.payload).data;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{extra.struct_operand});
- return .none;
- }
-
const container_ptr_val = try f.resolveInst(extra.struct_operand);
try reap(f, inst, &.{extra.struct_operand});
const container_ptr_ty = f.air.typeOf(extra.struct_operand);
@@ -5356,11 +5245,6 @@ fn airStructFieldPtr(f: *Function, inst: Air.Inst.Index) !CValue {
fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
-
const container_ptr_val = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const container_ptr_ty = f.air.typeOf(ty_op.operand);
@@ -5371,11 +5255,6 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{extra.field_ptr});
- return .none;
- }
-
const target = f.object.dg.module.getTarget();
const container_ptr_ty = f.air.typeOfIndex(inst);
const container_ty = container_ptr_ty.childType();
@@ -5494,11 +5373,6 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.StructField, ty_pl.payload).data;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{extra.struct_operand});
- return .none;
- }
-
const inst_ty = f.air.typeOfIndex(inst);
if (!inst_ty.hasRuntimeBitsIgnoreComptime()) {
try reap(f, inst, &.{extra.struct_operand});
@@ -5644,11 +5518,6 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
-
const inst_ty = f.air.typeOfIndex(inst);
const operand = try f.resolveInst(ty_op.operand);
const operand_ty = f.air.typeOf(ty_op.operand);
@@ -5681,11 +5550,6 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
-
const inst_ty = f.air.typeOfIndex(inst);
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -5723,11 +5587,6 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu
fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
-
const inst_ty = f.air.typeOfIndex(inst);
const payload = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -5769,10 +5628,6 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue {
fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
const writer = f.object.writer();
const operand = try f.resolveInst(ty_op.operand);
@@ -5836,7 +5691,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airErrReturnTrace(f: *Function, inst: Air.Inst.Index) !CValue {
- if (f.liveness.isUnused(inst)) return .none;
+ _ = inst;
return f.fail("TODO: C backend: implement airErrReturnTrace", .{});
}
@@ -5852,10 +5707,6 @@ fn airSaveErrReturnTraceIndex(f: *Function, inst: Air.Inst.Index) !CValue {
fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
const inst_ty = f.air.typeOfIndex(inst);
const payload_ty = inst_ty.errorUnionPayload();
@@ -5890,11 +5741,6 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue {
fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const u8) !CValue {
const un_op = f.air.instructions.items(.data)[inst].un_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{un_op});
- return .none;
- }
-
const writer = f.object.writer();
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
@@ -5928,11 +5774,6 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const
fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
-
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const inst_ty = f.air.typeOfIndex(inst);
@@ -5966,11 +5807,6 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
-
const inst_ty = f.air.typeOfIndex(inst);
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -6014,11 +5850,6 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
fn airPtrToInt(f: *Function, inst: Air.Inst.Index) !CValue {
const un_op = f.air.instructions.items(.data)[inst].un_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{un_op});
- return .none;
- }
-
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
const inst_ty = f.air.typeOfIndex(inst);
@@ -6042,11 +5873,6 @@ fn airUnBuiltinCall(
) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
-
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const inst_ty = f.air.typeOfIndex(inst);
@@ -6090,11 +5916,6 @@ fn airBinBuiltinCall(
) !CValue {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- return .none;
- }
-
const operand_ty = f.air.typeOf(bin_op.lhs);
const operand_cty = try f.typeToCType(operand_ty, .complete);
const is_big = operand_cty.tag() == .array;
@@ -6147,11 +5968,6 @@ fn airCmpBuiltinCall(
operation: enum { cmp, operator },
info: BuiltinInfo,
) !CValue {
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ data.lhs, data.rhs });
- return .none;
- }
-
const lhs = try f.resolveInst(data.lhs);
const rhs = try f.resolveInst(data.rhs);
try reap(f, inst, &.{ data.lhs, data.rhs });
@@ -6322,9 +6138,6 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const ptr = try f.resolveInst(atomic_load.ptr);
try reap(f, inst, &.{atomic_load.ptr});
const ptr_ty = f.air.typeOf(atomic_load.ptr);
- if (!ptr_ty.isVolatilePtr() and f.liveness.isUnused(inst)) {
- return .none;
- }
const inst_ty = f.air.typeOfIndex(inst);
const writer = f.object.writer();
@@ -6468,11 +6281,6 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
-
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -6496,11 +6304,6 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue {
const un_op = f.air.instructions.items(.data)[inst].un_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{un_op});
- return .none;
- }
-
const inst_ty = f.air.typeOfIndex(inst);
const enum_ty = f.air.typeOf(un_op);
const operand = try f.resolveInst(un_op);
@@ -6521,11 +6324,6 @@ fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue {
fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue {
const un_op = f.air.instructions.items(.data)[inst].un_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{un_op});
- return .none;
- }
-
const writer = f.object.writer();
const inst_ty = f.air.typeOfIndex(inst);
const operand = try f.resolveInst(un_op);
@@ -6542,11 +6340,6 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue {
fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
-
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -6578,11 +6371,6 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue {
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const extra = f.air.extraData(Air.Bin, pl_op.payload).data;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs });
- return .none;
- }
-
const pred = try f.resolveInst(pl_op.operand);
const lhs = try f.resolveInst(extra.lhs);
const rhs = try f.resolveInst(extra.rhs);
@@ -6614,11 +6402,6 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ extra.a, extra.b });
- return .none;
- }
-
const mask = f.air.values[extra.mask];
const lhs = try f.resolveInst(extra.a);
const rhs = try f.resolveInst(extra.b);
@@ -6660,11 +6443,6 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
const reduce = f.air.instructions.items(.data)[inst].reduce;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{reduce.operand});
- return .none;
- }
-
const target = f.object.dg.module.getTarget();
const scalar_ty = f.air.typeOfIndex(inst);
const operand = try f.resolveInst(reduce.operand);
@@ -6836,8 +6614,6 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
}
}
- if (f.liveness.isUnused(inst)) return .none;
-
const target = f.object.dg.module.getTarget();
const writer = f.object.writer();
@@ -7004,11 +6780,6 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{extra.init});
- return .none;
- }
-
const union_ty = f.air.typeOfIndex(inst);
const target = f.object.dg.module.getTarget();
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
@@ -7075,8 +6846,6 @@ fn airPrefetch(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airWasmMemorySize(f: *Function, inst: Air.Inst.Index) !CValue {
- if (f.liveness.isUnused(inst)) return .none;
-
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const writer = f.object.writer();
@@ -7109,10 +6878,6 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue {
fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue {
const un_op = f.air.instructions.items(.data)[inst].un_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{un_op});
- return .none;
- }
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
@@ -7138,10 +6903,6 @@ fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue {
fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue {
const un_op = f.air.instructions.items(.data)[inst].un_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{un_op});
- return .none;
- }
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
@@ -7169,10 +6930,6 @@ fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVal
fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- return .none;
- }
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
@@ -7205,10 +6962,6 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa
fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const bin_op = f.air.extraData(Air.Bin, pl_op.payload).data;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand });
- return .none;
- }
const mulend1 = try f.resolveInst(bin_op.lhs);
const mulend2 = try f.resolveInst(bin_op.rhs);
@@ -7241,8 +6994,6 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue {
- if (f.liveness.isUnused(inst)) return .none;
-
const inst_ty = f.air.typeOfIndex(inst);
const fn_cty = try f.typeToCType(f.object.dg.decl.?.ty, .complete);
const param_len = fn_cty.castTag(.varargs_function).?.data.param_types.len;
@@ -7261,10 +7012,6 @@ fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue {
fn airCVaArg(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
const inst_ty = f.air.typeOfIndex(inst);
const va_list = try f.resolveInst(ty_op.operand);
@@ -7296,10 +7043,6 @@ fn airCVaEnd(f: *Function, inst: Air.Inst.Index) !CValue {
fn airCVaCopy(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- if (f.liveness.isUnused(inst)) {
- try reap(f, inst, &.{ty_op.operand});
- return .none;
- }
const inst_ty = f.air.typeOfIndex(inst);
const va_list = try f.resolveInst(ty_op.operand);
@@ -7863,7 +7606,6 @@ fn freeLocal(f: *Function, inst: Air.Inst.Index, local_index: LocalIndex, ref_in
const gpa = f.object.dg.gpa;
const local = &f.locals.items[local_index];
log.debug("%{d}: freeing t{d} (operand %{d})", .{ inst, local_index, ref_inst });
- if (f.is_in_clone != local.is_in_clone) return;
const gop = try f.free_locals_map.getOrPut(gpa, local.getType());
if (!gop.found_existing) gop.value_ptr.* = .{};
if (std.debug.runtime_safety) {
@@ -7921,35 +7663,3 @@ fn deinitFreeLocalsMap(gpa: mem.Allocator, map: *LocalsMap) void {
}
map.deinit(gpa);
}
-
-fn noticeBranchFrees(
- f: *Function,
- pre_locals_len: LocalIndex,
- pre_allocs_len: LocalIndex,
- inst: Air.Inst.Index,
-) !void {
- for (f.locals.items[pre_locals_len..], pre_locals_len..) |*local, local_i| {
- const local_index = @intCast(LocalIndex, local_i);
- if (f.allocs.contains(local_index)) {
- if (std.debug.runtime_safety) {
- // new allocs are no longer freeable, so make sure they aren't in the free list
- if (f.free_locals_map.getPtr(local.getType())) |locals_list| {
- assert(!locals_list.contains(local_index));
- }
- }
- continue;
- }
-
- // free cloned locals from other branches at current cloned-ness
- std.debug.assert(local.is_in_clone or !f.is_in_clone);
- local.is_in_clone = f.is_in_clone;
- try freeLocal(f, inst, local_index, 0);
- }
-
- for (f.allocs.keys()[pre_allocs_len..]) |local_i| {
- const local_index = @intCast(LocalIndex, local_i);
- const local = &f.locals.items[local_index];
- // new allocs are no longer freeable, so remove them from the free list
- if (f.free_locals_map.getPtr(local.getType())) |locals_list| _ = locals_list.swapRemove(local_index);
- }
-}
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index a081448155..897a582952 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -4523,6 +4523,10 @@ pub const FuncGen = struct {
fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void {
const air_tags = self.air.instructions.items(.tag);
for (body, 0..) |inst, i| {
+ if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) {
+ continue;
+ }
+
const opt_value: ?*llvm.Value = switch (air_tags[inst]) {
// zig fmt: off
.add => try self.airAdd(inst, false),
@@ -5166,8 +5170,6 @@ pub const FuncGen = struct {
}
fn airCVaArg(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const list = try self.resolveInst(ty_op.operand);
const arg_ty = self.air.getRefType(ty_op.ty);
@@ -5177,8 +5179,6 @@ pub const FuncGen = struct {
}
fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const src_list = try self.resolveInst(ty_op.operand);
const va_list_ty = self.air.getRefType(ty_op.ty);
@@ -5226,8 +5226,6 @@ pub const FuncGen = struct {
}
fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const va_list_ty = self.air.typeOfIndex(inst);
const llvm_va_list_ty = try self.dg.lowerType(va_list_ty);
@@ -5254,7 +5252,6 @@ pub const FuncGen = struct {
}
fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator, want_fast_math: bool) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -5266,7 +5263,6 @@ pub const FuncGen = struct {
}
fn airCmpVector(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -5281,8 +5277,6 @@ pub const FuncGen = struct {
}
fn airCmpLtErrorsLen(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const llvm_fn = try self.getCmpLtErrorsLenFunction();
@@ -5650,9 +5644,6 @@ pub const FuncGen = struct {
}
fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
const array_ty = operand_ty.childType();
@@ -5674,9 +5665,6 @@ pub const FuncGen = struct {
}
fn airIntToFloat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
@@ -5733,9 +5721,6 @@ pub const FuncGen = struct {
}
fn airFloatToInt(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
-
self.builder.setFastMath(want_fast_math);
const target = self.dg.module.getTarget();
@@ -5792,16 +5777,12 @@ pub const FuncGen = struct {
}
fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
return self.builder.buildExtractValue(operand, index, "");
}
fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const slice_ptr = try self.resolveInst(ty_op.operand);
const slice_ptr_ty = self.air.typeOf(ty_op.operand);
@@ -5814,8 +5795,6 @@ pub const FuncGen = struct {
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const slice_ty = self.air.typeOf(bin_op.lhs);
- if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null;
-
const slice = try self.resolveInst(bin_op.lhs);
const index = try self.resolveInst(bin_op.rhs);
const elem_ty = slice_ty.childType();
@@ -5835,7 +5814,6 @@ pub const FuncGen = struct {
}
fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const slice_ty = self.air.typeOf(bin_op.lhs);
@@ -5850,7 +5828,6 @@ pub const FuncGen = struct {
fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
const inst = body_tail[0];
- if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const array_ty = self.air.typeOf(bin_op.lhs);
@@ -5881,8 +5858,6 @@ pub const FuncGen = struct {
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.air.typeOf(bin_op.lhs);
- if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null;
-
const elem_ty = ptr_ty.childType();
const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty);
const base_ptr = try self.resolveInst(bin_op.lhs);
@@ -5908,8 +5883,6 @@ pub const FuncGen = struct {
}
fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_ty = self.air.typeOf(bin_op.lhs);
@@ -5934,9 +5907,6 @@ pub const FuncGen = struct {
}
fn airStructFieldPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
-
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
const struct_ptr = try self.resolveInst(struct_field.struct_operand);
@@ -5949,8 +5919,6 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
field_index: u32,
) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const struct_ptr = try self.resolveInst(ty_op.operand);
const struct_ptr_ty = self.air.typeOf(ty_op.operand);
@@ -5959,8 +5927,6 @@ pub const FuncGen = struct {
fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
const inst = body_tail[0];
- if (self.liveness.isUnused(inst)) return null;
-
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
const struct_ty = self.air.typeOf(struct_field.struct_operand);
@@ -6060,8 +6026,6 @@ pub const FuncGen = struct {
}
fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
@@ -6083,9 +6047,6 @@ pub const FuncGen = struct {
}
fn airNot(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
@@ -6263,8 +6224,6 @@ pub const FuncGen = struct {
const clobbers_len = @truncate(u31, extra.data.flags);
var extra_i: usize = extra.end;
- if (!is_volatile and self.liveness.isUnused(inst)) return null;
-
const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
@@ -6610,8 +6569,6 @@ pub const FuncGen = struct {
operand_is_ptr: bool,
pred: llvm.IntPredicate,
) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const operand_ty = self.air.typeOf(un_op);
@@ -6659,8 +6616,6 @@ pub const FuncGen = struct {
op: llvm.IntPredicate,
operand_is_ptr: bool,
) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const operand_ty = self.air.typeOf(un_op);
@@ -6701,8 +6656,6 @@ pub const FuncGen = struct {
}
fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.air.typeOf(ty_op.operand).childType();
@@ -6756,8 +6709,6 @@ pub const FuncGen = struct {
fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
const inst = body_tail[0];
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.air.typeOf(ty_op.operand);
@@ -6780,8 +6731,6 @@ pub const FuncGen = struct {
operand_is_ptr: bool,
) !?*llvm.Value {
const inst = body_tail[0];
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
@@ -6817,9 +6766,6 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
operand_is_ptr: bool,
) !?*llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
@@ -6893,8 +6839,6 @@ pub const FuncGen = struct {
}
fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const target = self.dg.module.getTarget();
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -6911,8 +6855,6 @@ pub const FuncGen = struct {
}
fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const payload_ty = self.air.typeOf(ty_op.operand);
const non_null_bit = self.context.intType(8).constInt(1, .False);
@@ -6943,8 +6885,6 @@ pub const FuncGen = struct {
}
fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const err_un_ty = self.air.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
@@ -6978,8 +6918,6 @@ pub const FuncGen = struct {
}
fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const err_un_ty = self.air.typeOfIndex(inst);
const payload_ty = err_un_ty.errorUnionPayload();
@@ -7015,8 +6953,6 @@ pub const FuncGen = struct {
}
fn airWasmMemorySize(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const index = pl_op.payload;
const llvm_u32 = self.context.intType(32);
@@ -7061,8 +6997,6 @@ pub const FuncGen = struct {
}
fn airMin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7074,8 +7008,6 @@ pub const FuncGen = struct {
}
fn airMax(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7087,8 +7019,6 @@ pub const FuncGen = struct {
}
fn airSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr = try self.resolveInst(bin_op.lhs);
@@ -7103,7 +7033,6 @@ pub const FuncGen = struct {
}
fn airAdd(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7118,7 +7047,6 @@ pub const FuncGen = struct {
}
fn airAddWrap(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7129,8 +7057,6 @@ pub const FuncGen = struct {
}
fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7144,7 +7070,6 @@ pub const FuncGen = struct {
}
fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7159,7 +7084,6 @@ pub const FuncGen = struct {
}
fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7170,8 +7094,6 @@ pub const FuncGen = struct {
}
fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7184,7 +7106,6 @@ pub const FuncGen = struct {
}
fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7199,7 +7120,6 @@ pub const FuncGen = struct {
}
fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7210,8 +7130,6 @@ pub const FuncGen = struct {
}
fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7224,7 +7142,6 @@ pub const FuncGen = struct {
}
fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7236,7 +7153,6 @@ pub const FuncGen = struct {
}
fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7254,7 +7170,6 @@ pub const FuncGen = struct {
}
fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7287,7 +7202,6 @@ pub const FuncGen = struct {
}
fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7302,7 +7216,6 @@ pub const FuncGen = struct {
}
fn airRem(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7317,7 +7230,6 @@ pub const FuncGen = struct {
}
fn airMod(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7347,8 +7259,6 @@ pub const FuncGen = struct {
}
fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const base_ptr = try self.resolveInst(bin_op.lhs);
@@ -7368,8 +7278,6 @@ pub const FuncGen = struct {
}
fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const base_ptr = try self.resolveInst(bin_op.lhs);
@@ -7395,9 +7303,6 @@ pub const FuncGen = struct {
signed_intrinsic: []const u8,
unsigned_intrinsic: []const u8,
) !?*llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
-
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -7686,8 +7591,6 @@ pub const FuncGen = struct {
}
fn airMulAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
@@ -7700,9 +7603,6 @@ pub const FuncGen = struct {
}
fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
-
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -7759,8 +7659,6 @@ pub const FuncGen = struct {
}
fn airAnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7768,8 +7666,6 @@ pub const FuncGen = struct {
}
fn airOr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7777,8 +7673,6 @@ pub const FuncGen = struct {
}
fn airXor(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7786,8 +7680,6 @@ pub const FuncGen = struct {
}
fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7809,8 +7701,6 @@ pub const FuncGen = struct {
}
fn airShl(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7831,8 +7721,6 @@ pub const FuncGen = struct {
}
fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7876,8 +7764,6 @@ pub const FuncGen = struct {
}
fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7912,9 +7798,6 @@ pub const FuncGen = struct {
}
fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
-
const target = self.dg.module.getTarget();
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const dest_ty = self.air.typeOfIndex(inst);
@@ -7937,8 +7820,6 @@ pub const FuncGen = struct {
}
fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
@@ -7946,9 +7827,6 @@ pub const FuncGen = struct {
}
fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
@@ -7978,9 +7856,6 @@ pub const FuncGen = struct {
}
fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
@@ -8010,9 +7885,6 @@ pub const FuncGen = struct {
}
fn airPtrToInt(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
-
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
@@ -8020,8 +7892,6 @@ pub const FuncGen = struct {
}
fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
const inst_ty = self.air.typeOfIndex(inst);
@@ -8137,9 +8007,6 @@ pub const FuncGen = struct {
}
fn airBoolToInt(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
-
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
return operand;
@@ -8189,7 +8056,6 @@ pub const FuncGen = struct {
}
fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
const ptr_ty = self.air.typeOfIndex(inst);
const pointee_type = ptr_ty.childType();
if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty);
@@ -8201,7 +8067,6 @@ pub const FuncGen = struct {
}
fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
const ptr_ty = self.air.typeOfIndex(inst);
const ret_ty = ptr_ty.childType();
if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty);
@@ -8289,8 +8154,6 @@ pub const FuncGen = struct {
const ptr = try fg.resolveInst(ty_op.operand);
elide: {
- if (ptr_info.@"volatile") break :elide;
- if (fg.liveness.isUnused(inst)) return null;
if (!isByRef(ptr_info.pointee_type)) break :elide;
if (!canElideLoad(fg, body_tail)) break :elide;
return ptr;
@@ -8314,8 +8177,7 @@ pub const FuncGen = struct {
}
fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
+ _ = inst;
const llvm_usize = try self.dg.lowerType(Type.usize);
const target = self.dg.module.getTarget();
if (!target_util.supportsReturnAddress(target)) {
@@ -8331,8 +8193,7 @@ pub const FuncGen = struct {
}
fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
+ _ = inst;
const llvm_i32 = self.context.intType(32);
const llvm_fn_name = "llvm.frameaddress.p0";
const llvm_fn = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
@@ -8462,8 +8323,6 @@ pub const FuncGen = struct {
const ptr = try self.resolveInst(atomic_load.ptr);
const ptr_ty = self.air.typeOf(atomic_load.ptr);
const ptr_info = ptr_ty.ptrInfo().data;
- if (!ptr_info.@"volatile" and self.liveness.isUnused(inst))
- return null;
const elem_ty = ptr_info.pointee_type;
if (!elem_ty.hasRuntimeBitsIgnoreComptime())
return null;
@@ -8577,8 +8436,6 @@ pub const FuncGen = struct {
}
fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const un_ty = self.air.typeOf(ty_op.operand);
const target = self.dg.module.getTarget();
@@ -8603,8 +8460,6 @@ pub const FuncGen = struct {
}
fn airUnaryOp(self: *FuncGen, inst: Air.Inst.Index, comptime op: FloatOp) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const operand_ty = self.air.typeOf(un_op);
@@ -8613,7 +8468,6 @@ pub const FuncGen = struct {
}
fn airNeg(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const un_op = self.air.instructions.items(.data)[inst].un_op;
@@ -8624,8 +8478,6 @@ pub const FuncGen = struct {
}
fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
@@ -8652,8 +8504,6 @@ pub const FuncGen = struct {
}
fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
@@ -8679,8 +8529,6 @@ pub const FuncGen = struct {
}
fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const target = self.dg.module.getTarget();
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
@@ -8734,8 +8582,6 @@ pub const FuncGen = struct {
}
fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const error_set_ty = self.air.getRefType(ty_op.ty);
@@ -8781,8 +8627,6 @@ pub const FuncGen = struct {
}
fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const enum_ty = self.air.typeOf(un_op);
@@ -8862,8 +8706,6 @@ pub const FuncGen = struct {
}
fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const enum_ty = self.air.typeOf(un_op);
@@ -8995,8 +8837,6 @@ pub const FuncGen = struct {
}
fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const slice_ty = self.air.typeOfIndex(inst);
@@ -9011,8 +8851,6 @@ pub const FuncGen = struct {
}
fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const scalar = try self.resolveInst(ty_op.operand);
const vector_ty = self.air.typeOfIndex(inst);
@@ -9021,8 +8859,6 @@ pub const FuncGen = struct {
}
fn airSelect(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
const pred = try self.resolveInst(pl_op.operand);
@@ -9033,8 +8869,6 @@ pub const FuncGen = struct {
}
fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const a = try self.resolveInst(extra.a);
@@ -9134,7 +8968,6 @@ pub const FuncGen = struct {
}
fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const target = self.dg.module.getTarget();
@@ -9221,8 +9054,6 @@ pub const FuncGen = struct {
}
fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const result_ty = self.air.typeOfIndex(inst);
const len = @intCast(usize, result_ty.arrayLen());
@@ -9360,8 +9191,6 @@ pub const FuncGen = struct {
}
fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
const union_ty = self.air.typeOfIndex(inst);
@@ -9566,8 +9395,6 @@ pub const FuncGen = struct {
}
fn airAddrSpaceCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const inst_ty = self.air.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
@@ -9592,8 +9419,6 @@ pub const FuncGen = struct {
}
fn airWorkItemId(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const target = self.dg.module.getTarget();
assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures
@@ -9603,8 +9428,6 @@ pub const FuncGen = struct {
}
fn airWorkGroupSize(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const target = self.dg.module.getTarget();
assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures
@@ -9634,8 +9457,6 @@ pub const FuncGen = struct {
}
fn airWorkGroupId(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const target = self.dg.module.getTarget();
assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures
@@ -9756,8 +9577,6 @@ pub const FuncGen = struct {
struct_ptr_ty: Type,
field_index: u32,
) !?*llvm.Value {
- if (self.liveness.isUnused(inst)) return null;
-
const target = self.dg.object.target;
const struct_ty = struct_ptr_ty.childType();
switch (struct_ty.zigTypeTag()) {
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index 58da5539ac..87b72c6726 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -1507,6 +1507,11 @@ pub const DeclGen = struct {
}
fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void {
+ // TODO: remove now-redundant isUnused calls from AIR handler functions
+ if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) {
+ return;
+ }
+
const air_tags = self.air.instructions.items(.tag);
const maybe_result_id: ?IdRef = switch (air_tags[inst]) {
// zig fmt: off
diff --git a/src/print_air.zig b/src/print_air.zig
index 2970d43925..2053320653 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -8,16 +8,16 @@ const Type = @import("type.zig").Type;
const Air = @import("Air.zig");
const Liveness = @import("Liveness.zig");
-pub fn write(stream: anytype, module: *Module, air: Air, liveness: Liveness) void {
+pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) void {
const instruction_bytes = air.instructions.len *
// Here we don't use @sizeOf(Air.Inst.Data) because it would include
// the debug safety tag but we want to measure release size.
(@sizeOf(Air.Inst.Tag) + 8);
const extra_bytes = air.extra.len * @sizeOf(u32);
const values_bytes = air.values.len * @sizeOf(Value);
- const tomb_bytes = liveness.tomb_bits.len * @sizeOf(usize);
- const liveness_extra_bytes = liveness.extra.len * @sizeOf(u32);
- const liveness_special_bytes = liveness.special.count() * 8;
+ const tomb_bytes = if (liveness) |l| l.tomb_bits.len * @sizeOf(usize) else 0;
+ const liveness_extra_bytes = if (liveness) |l| l.extra.len * @sizeOf(u32) else 0;
+ const liveness_special_bytes = if (liveness) |l| l.special.count() * 8 else 0;
const total_bytes = @sizeOf(Air) + instruction_bytes + extra_bytes +
values_bytes + @sizeOf(Liveness) + liveness_extra_bytes +
liveness_special_bytes + tomb_bytes;
@@ -38,8 +38,8 @@ pub fn write(stream: anytype, module: *Module, air: Air, liveness: Liveness) voi
air.extra.len, fmtIntSizeBin(extra_bytes),
air.values.len, fmtIntSizeBin(values_bytes),
fmtIntSizeBin(tomb_bytes),
- liveness.extra.len, fmtIntSizeBin(liveness_extra_bytes),
- liveness.special.count(), fmtIntSizeBin(liveness_special_bytes),
+ if (liveness) |l| l.extra.len else 0, fmtIntSizeBin(liveness_extra_bytes),
+ if (liveness) |l| l.special.count() else 0, fmtIntSizeBin(liveness_special_bytes),
}) catch return;
// zig fmt: on
@@ -61,7 +61,7 @@ pub fn writeInst(
inst: Air.Inst.Index,
module: *Module,
air: Air,
- liveness: Liveness,
+ liveness: ?Liveness,
) void {
var writer: Writer = .{
.module = module,
@@ -74,11 +74,11 @@ pub fn writeInst(
writer.writeInst(stream, inst) catch return;
}
-pub fn dump(module: *Module, air: Air, liveness: Liveness) void {
+pub fn dump(module: *Module, air: Air, liveness: ?Liveness) void {
write(std.io.getStdErr().writer(), module, air, liveness);
}
-pub fn dumpInst(inst: Air.Inst.Index, module: *Module, air: Air, liveness: Liveness) void {
+pub fn dumpInst(inst: Air.Inst.Index, module: *Module, air: Air, liveness: ?Liveness) void {
writeInst(std.io.getStdErr().writer(), inst, module, air, liveness);
}
@@ -86,7 +86,7 @@ const Writer = struct {
module: *Module,
gpa: Allocator,
air: Air,
- liveness: Liveness,
+ liveness: ?Liveness,
indent: usize,
skip_body: bool,
@@ -109,7 +109,7 @@ const Writer = struct {
try s.writeByteNTimes(' ', w.indent);
try s.print("%{d}{c}= {s}(", .{
inst,
- @as(u8, if (w.liveness.isUnused(inst)) '!' else ' '),
+ @as(u8, if (if (w.liveness) |liveness| liveness.isUnused(inst) else false) '!' else ' '),
@tagName(tag),
});
switch (tag) {
@@ -389,6 +389,10 @@ const Writer = struct {
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
const extra = w.air.extraData(Air.Block, ty_pl.payload);
const body = w.air.extra[extra.end..][0..extra.data.body_len];
+ const liveness_block = if (w.liveness) |liveness|
+ liveness.getBlock(inst)
+ else
+ Liveness.BlockSlices{ .deaths = &.{} };
try w.writeType(s, w.air.getRefType(ty_pl.ty));
if (w.skip_body) return s.writeAll(", ...");
@@ -399,13 +403,16 @@ const Writer = struct {
w.indent = old_indent;
try s.writeByteNTimes(' ', w.indent);
try s.writeAll("}");
+
+ for (liveness_block.deaths) |operand| {
+ try s.print(" %{d}!", .{operand});
+ }
}
fn writeLoop(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
const extra = w.air.extraData(Air.Block, ty_pl.payload);
const body = w.air.extra[extra.end..][0..extra.data.body_len];
- const liveness_loop = w.liveness.getLoop(inst);
try w.writeType(s, w.air.getRefType(ty_pl.ty));
if (w.skip_body) return s.writeAll(", ...");
@@ -413,14 +420,6 @@ const Writer = struct {
const old_indent = w.indent;
w.indent += 2;
try w.writeBody(s, body);
- if (liveness_loop.deaths.len != 0) {
- try s.writeByteNTimes(' ', w.indent);
- for (liveness_loop.deaths, 0..) |operand, i| {
- if (i != 0) try s.writeAll(" ");
- try s.print("%{d}!", .{operand});
- }
- try s.writeAll("\n");
- }
w.indent = old_indent;
try s.writeByteNTimes(' ', w.indent);
try s.writeAll("}");
@@ -746,22 +745,44 @@ const Writer = struct {
const pl_op = w.air.instructions.items(.data)[inst].pl_op;
const extra = w.air.extraData(Air.Try, pl_op.payload);
const body = w.air.extra[extra.end..][0..extra.data.body_len];
+ const liveness_condbr = if (w.liveness) |liveness|
+ liveness.getCondBr(inst)
+ else
+ Liveness.CondBrSlices{ .then_deaths = &.{}, .else_deaths = &.{} };
try w.writeOperand(s, inst, 0, pl_op.operand);
if (w.skip_body) return s.writeAll(", ...");
try s.writeAll(", {\n");
const old_indent = w.indent;
w.indent += 2;
+
+ if (liveness_condbr.else_deaths.len != 0) {
+ try s.writeByteNTimes(' ', w.indent);
+ for (liveness_condbr.else_deaths, 0..) |operand, i| {
+ if (i != 0) try s.writeAll(" ");
+ try s.print("%{d}!", .{operand});
+ }
+ try s.writeAll("\n");
+ }
try w.writeBody(s, body);
+
w.indent = old_indent;
try s.writeByteNTimes(' ', w.indent);
try s.writeAll("}");
+
+ for (liveness_condbr.then_deaths) |operand| {
+ try s.print(" %{d}!", .{operand});
+ }
}
fn writeTryPtr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
const extra = w.air.extraData(Air.TryPtr, ty_pl.payload);
const body = w.air.extra[extra.end..][0..extra.data.body_len];
+ const liveness_condbr = if (w.liveness) |liveness|
+ liveness.getCondBr(inst)
+ else
+ Liveness.CondBrSlices{ .then_deaths = &.{}, .else_deaths = &.{} };
try w.writeOperand(s, inst, 0, extra.data.ptr);
@@ -771,10 +792,24 @@ const Writer = struct {
try s.writeAll(", {\n");
const old_indent = w.indent;
w.indent += 2;
+
+ if (liveness_condbr.else_deaths.len != 0) {
+ try s.writeByteNTimes(' ', w.indent);
+ for (liveness_condbr.else_deaths, 0..) |operand, i| {
+ if (i != 0) try s.writeAll(" ");
+ try s.print("%{d}!", .{operand});
+ }
+ try s.writeAll("\n");
+ }
try w.writeBody(s, body);
+
w.indent = old_indent;
try s.writeByteNTimes(' ', w.indent);
try s.writeAll("}");
+
+ for (liveness_condbr.then_deaths) |operand| {
+ try s.print(" %{d}!", .{operand});
+ }
}
fn writeCondBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
@@ -782,7 +817,10 @@ const Writer = struct {
const extra = w.air.extraData(Air.CondBr, pl_op.payload);
const then_body = w.air.extra[extra.end..][0..extra.data.then_body_len];
const else_body = w.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
- const liveness_condbr = w.liveness.getCondBr(inst);
+ const liveness_condbr = if (w.liveness) |liveness|
+ liveness.getCondBr(inst)
+ else
+ Liveness.CondBrSlices{ .then_deaths = &.{}, .else_deaths = &.{} };
try w.writeOperand(s, inst, 0, pl_op.operand);
if (w.skip_body) return s.writeAll(", ...");
@@ -822,8 +860,15 @@ const Writer = struct {
fn writeSwitchBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const pl_op = w.air.instructions.items(.data)[inst].pl_op;
const switch_br = w.air.extraData(Air.SwitchBr, pl_op.payload);
- const liveness = w.liveness.getSwitchBr(w.gpa, inst, switch_br.data.cases_len + 1) catch
- @panic("out of memory");
+ const liveness = if (w.liveness) |liveness|
+ liveness.getSwitchBr(w.gpa, inst, switch_br.data.cases_len + 1) catch
+ @panic("out of memory")
+ else blk: {
+ const slice = w.gpa.alloc([]const Air.Inst.Index, switch_br.data.cases_len + 1) catch
+ @panic("out of memory");
+ std.mem.set([]const Air.Inst.Index, slice, &.{});
+ break :blk Liveness.SwitchBrTable{ .deaths = slice };
+ };
defer w.gpa.free(liveness.deaths);
var extra_index: usize = switch_br.end;
var case_i: u32 = 0;
@@ -913,13 +958,13 @@ const Writer = struct {
operand: Air.Inst.Ref,
) @TypeOf(s).Error!void {
const small_tomb_bits = Liveness.bpi - 1;
- const dies = if (op_index < small_tomb_bits)
- w.liveness.operandDies(inst, @intCast(Liveness.OperandInt, op_index))
- else blk: {
- var extra_index = w.liveness.special.get(inst).?;
+ const dies = if (w.liveness) |liveness| blk: {
+ if (op_index < small_tomb_bits)
+ break :blk liveness.operandDies(inst, @intCast(Liveness.OperandInt, op_index));
+ var extra_index = liveness.special.get(inst).?;
var tomb_op_index: usize = small_tomb_bits;
while (true) {
- const bits = w.liveness.extra[extra_index];
+ const bits = liveness.extra[extra_index];
if (op_index < tomb_op_index + 31) {
break :blk @truncate(u1, bits >> @intCast(u5, op_index - tomb_op_index)) != 0;
}
@@ -927,7 +972,7 @@ const Writer = struct {
extra_index += 1;
tomb_op_index += 31;
}
- };
+ } else false;
return w.writeInstRef(s, operand, dies);
}
diff --git a/src/register_manager.zig b/src/register_manager.zig
index fe53ba3b95..841545bf09 100644
--- a/src/register_manager.zig
+++ b/src/register_manager.zig
@@ -95,6 +95,10 @@ pub fn RegisterManager(
return indexOfReg(tracked_registers, reg);
}
+ pub fn regAtTrackedIndex(index: RegisterBitSet.ShiftInt) Register {
+ return tracked_registers[index];
+ }
+
/// Returns true when this register is not tracked
pub fn isRegFree(self: Self, reg: Register) bool {
const index = indexOfRegIntoTracked(reg) orelse return true;