aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2021-07-10 16:24:35 -0700
committerAndrew Kelley <andrew@ziglang.org>2021-07-20 12:18:14 -0700
commit3c3abaf3907e344305620fb4565e7c1acb0a9c88 (patch)
treed418d08645bf500bb30e3b3b388dbfa4a21e4aec /src
parent5d6f7b44c19b064a543b0c1eecb6ef5c671b612e (diff)
downloadzig-3c3abaf3907e344305620fb4565e7c1acb0a9c88.tar.gz
zig-3c3abaf3907e344305620fb4565e7c1acb0a9c88.zip
stage2: update liveness analysis to new AIR memory layout
It's pretty compact, with each AIR instruction only taking up 4 bits, plus a sparse table for special instructions such as conditional branch, switch branch, and function calls with more than 2 arguments.
Diffstat (limited to 'src')
-rw-r--r--src/Air.zig122
-rw-r--r--src/Liveness.zig457
-rw-r--r--src/codegen.zig13
-rw-r--r--src/liveness.zig254
4 files changed, 541 insertions, 305 deletions
diff --git a/src/Air.zig b/src/Air.zig
index 97a5824abc..c57232fba0 100644
--- a/src/Air.zig
+++ b/src/Air.zig
@@ -10,10 +10,18 @@ const Air = @This();
instructions: std.MultiArrayList(Inst).Slice,
/// The meaning of this data is determined by `Inst.Tag` value.
+/// The first few indexes are reserved. See `ExtraIndex` for the values.
extra: []u32,
values: []Value,
variables: []*Module.Var,
+pub const ExtraIndex = enum(u32) {
+ /// Payload index of the main `Block` in the `extra` array.
+ main_block,
+
+ _,
+};
+
pub const Inst = struct {
tag: Tag,
data: Data,
@@ -231,11 +239,25 @@ pub const Inst = struct {
.neq => .cmp_neq,
};
}
+
+ pub fn toCmpOp(tag: Tag) ?std.math.CompareOperator {
+ return switch (tag) {
+ .cmp_lt => .lt,
+ .cmp_lte => .lte,
+ .cmp_eq => .eq,
+ .cmp_gte => .gte,
+ .cmp_gt => .gt,
+ .cmp_neq => .neq,
+ else => null,
+ };
+ }
};
/// The position of an AIR instruction within the `Air` instructions array.
pub const Index = u32;
+ pub const Ref = @import("Zir.zig").Inst.Ref;
+
/// All instructions have an 8-byte payload, which is contained within
/// this union. `Tag` determines which union field is active, as well as
/// how to interpret the data within.
@@ -281,55 +303,69 @@ pub const Inst = struct {
}
}
};
+};
- pub fn cmpOperator(base: *Inst) ?std.math.CompareOperator {
- return switch (base.tag) {
- .cmp_lt => .lt,
- .cmp_lte => .lte,
- .cmp_eq => .eq,
- .cmp_gte => .gte,
- .cmp_gt => .gt,
- .cmp_neq => .neq,
- else => null,
- };
- }
+/// Trailing is a list of instruction indexes for every `body_len`.
+pub const Block = struct {
+ body_len: u32,
+};
- /// Trailing is a list of instruction indexes for every `body_len`.
- pub const Block = struct {
- body_len: u32,
- };
+/// Trailing is a list of `Ref` for every `args_len`.
+pub const Call = struct {
+ args_len: u32,
+};
- /// Trailing is a list of `Ref` for every `args_len`.
- pub const Call = struct {
- args_len: u32,
- };
+/// This data is stored inside extra, with two sets of trailing `Ref`:
+/// * 0. the then body, according to `then_body_len`.
+/// * 1. the else body, according to `else_body_len`.
+pub const CondBr = struct {
+ then_body_len: u32,
+ else_body_len: u32,
+};
- /// This data is stored inside extra, with two sets of trailing `Ref`:
- /// * 0. the then body, according to `then_body_len`.
- /// * 1. the else body, according to `else_body_len`.
- pub const CondBr = struct {
- condition: Ref,
- then_body_len: u32,
- else_body_len: u32,
- };
+/// Trailing:
+/// * 0. `Case` for each `cases_len`
+/// * 1. the else body, according to `else_body_len`.
+pub const SwitchBr = struct {
+ cases_len: u32,
+ else_body_len: u32,
/// Trailing:
- /// * 0. `Case` for each `cases_len`
- /// * 1. the else body, according to `else_body_len`.
- pub const SwitchBr = struct {
- cases_len: u32,
- else_body_len: u32,
-
- /// Trailing:
- /// * instruction index for each `body_len`.
- pub const Case = struct {
- item: Ref,
- body_len: u32,
- };
+ /// * instruction index for each `body_len`.
+ pub const Case = struct {
+ item: Ref,
+ body_len: u32,
};
+};
- pub const StructField = struct {
- struct_ptr: Ref,
- field_index: u32,
- };
+pub const StructField = struct {
+ struct_ptr: Ref,
+ field_index: u32,
};
+
+pub fn getMainBody(air: Air) []const Air.Inst.Index {
+ const body_index = air.extra[@enumToInt(ExtraIndex.main_block)];
+ const body_len = air.extra[body_index];
+ return air.extra[body_index..][0..body_len];
+}
+
+/// Returns the requested data, as well as the new index which is at the start of the
+/// trailers for the object.
+pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end: usize } {
+ const fields = std.meta.fields(T);
+ var i: usize = index;
+ var result: T = undefined;
+ inline for (fields) |field| {
+ @field(result, field.name) = switch (field.field_type) {
+ u32 => air.extra[i],
+ Inst.Ref => @intToEnum(Inst.Ref, air.extra[i]),
+ i32 => @bitCast(i32, air.extra[i]),
+ else => @compileError("bad field type"),
+ };
+ i += 1;
+ }
+ return .{
+ .data = result,
+ .end = i,
+ };
+}
diff --git a/src/Liveness.zig b/src/Liveness.zig
new file mode 100644
index 0000000000..828614dcbb
--- /dev/null
+++ b/src/Liveness.zig
@@ -0,0 +1,457 @@
+//! For each AIR instruction, we want to know:
+//! * Is the instruction unreferenced (e.g. dies immediately)?
+//! * For each of its operands, does the operand die with this instruction (e.g. is
+//! this the last reference to it)?
+//! Some instructions are special, such as:
+//! * Conditional Branches
+//! * Switch Branches
+const Liveness = @This();
+const std = @import("std");
+const Air = @import("Air.zig");
+const trace = @import("tracy.zig").trace;
+const log = std.log.scoped(.liveness);
+const assert = std.debug.assert;
+const Allocator = std.mem.Allocator;
+
+/// This array is split into sets of 4 bits per AIR instruction.
+/// The MSB (0bX000) is whether the instruction is unreferenced.
+/// The LSB (0b000X) is the first operand, and so on, up to 3 operands. A set bit means the
+/// operand dies after this instruction.
+/// Instructions which need more data to track liveness have special handling via the
+/// `special` table.
+tomb_bits: []const usize,
+/// Sparse table of specially handled instructions. The value is an index into the `extra`
+/// array. The meaning of the data depends on the AIR tag.
+special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32),
+/// Auxilliary data. The way this data is interpreted is determined contextually.
+extra: []const u32,
+
+/// Trailing is the set of instructions whose lifetimes end at the start of the then branch,
+/// followed by the set of instructions whose lifetimes end at the start of the else branch.
+pub const CondBr = struct {
+ then_death_count: u32,
+ else_death_count: u32,
+};
+
+/// Trailing is:
+/// * For each case in the same order as in the AIR:
+/// - case_death_count: u32
+/// - Air.Inst.Index for each `case_death_count`: set of instructions whose lifetimes
+/// end at the start of this case.
+/// * Air.Inst.Index for each `else_death_count`: set of instructions whose lifetimes
+/// end at the start of the else case.
+pub const SwitchBr = struct {
+ else_death_count: u32,
+};
+
+pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ var a: Analysis = .{
+ .gpa = gpa,
+ .air = &air,
+ .table = .{},
+ .tomb_bits = try gpa.alloc(
+ usize,
+ (air.instructions.len * bpi + @bitSizeOf(usize) - 1) / @bitSizeOf(usize),
+ ),
+ .extra = .{},
+ .special = .{},
+ };
+ errdefer gpa.free(a.tomb_bits);
+ errdefer a.special.deinit(gpa);
+ defer a.extra.deinit(gpa);
+ defer a.table.deinit(gpa);
+
+ const main_body = air.getMainBody();
+ try a.table.ensureTotalCapacity(main_body.len);
+ try analyzeWithContext(&a, null, main_body);
+ return Liveness{
+ .tomb_bits = a.tomb_bits,
+ .special = a.special,
+ .extra = a.extra.toOwnedSlice(gpa),
+ };
+}
+
+pub fn deinit(l: *Liveness, gpa: *Allocator) void {
+ gpa.free(l.tomb_bits);
+ gpa.free(l.extra);
+ l.special.deinit(gpa);
+}
+
+/// How many tomb bits per AIR instruction.
+const bpi = 4;
+const Bpi = std.meta.Int(.unsigned, bpi);
+
+/// In-progress data; on successful analysis converted into `Liveness`.
+const Analysis = struct {
+ gpa: *Allocator,
+ air: *const Air,
+ table: std.AutoHashMapUnmanaged(Air.Inst.Index, void),
+ tomb_bits: []usize,
+ extra: std.ArrayListUnmanaged(u32),
+
+ fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void {
+ const usize_index = (inst * bpi) / @bitSizeOf(usize);
+ a.tomb_bits[usize_index] |= tomb_bits << (inst % (@bitSizeOf(usize) / bpi)) * bpi;
+ }
+
+ fn addExtra(a: *Analysis, extra: anytype) Allocator.Error!u32 {
+ const fields = std.meta.fields(@TypeOf(extra));
+ try a.extra.ensureUnusedCapacity(a.gpa, fields.len);
+ return addExtraAssumeCapacity(a, extra);
+ }
+
+ fn addExtraAssumeCapacity(a: *Analysis, extra: anytype) u32 {
+ const fields = std.meta.fields(@TypeOf(extra));
+ const result = @intCast(u32, a.extra.items.len);
+ inline for (fields) |field| {
+ a.extra.appendAssumeCapacity(switch (field.field_type) {
+ u32 => @field(extra, field.name),
+ else => @compileError("bad field type"),
+ });
+ }
+ return result;
+ }
+};
+
+fn analyzeWithContext(
+ a: *Analysis,
+ new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void),
+ body: []const Air.Inst.Index,
+) Allocator.Error!void {
+ var i: usize = body.len;
+
+ if (new_set) |ns| {
+ // We are only interested in doing this for instructions which are born
+ // before a conditional branch, so after obtaining the new set for
+ // each branch we prune the instructions which were born within.
+ while (i != 0) {
+ i -= 1;
+ const inst = body[i];
+ _ = ns.remove(inst);
+ try analyzeInst(a, new_set, inst);
+ }
+ } else {
+ while (i != 0) {
+ i -= 1;
+ const inst = body[i];
+ try analyzeInst(a, new_set, inst);
+ }
+ }
+}
+
+fn analyzeInst(
+ a: *Analysis,
+ new_set: ?*std.AutoHashMap(Air.Inst.Index, void),
+ inst: Air.Inst.Index,
+) Allocator.Error!void {
+ const gpa = a.gpa;
+ const table = &a.table;
+ const inst_tags = a.air.instructions.items(.tag);
+
+ // No tombstone for this instruction means it is never referenced,
+ // and its birth marks its own death. Very metal 🤘
+ const main_tomb = !table.contains(inst);
+
+ switch (inst_tags[inst]) {
+ .add,
+ .addwrap,
+ .sub,
+ .subwrap,
+ .mul,
+ .mulwrap,
+ .div,
+ .bit_and,
+ .bit_or,
+ .xor,
+ .cmp_lt,
+ .cmp_lte,
+ .cmp_eq,
+ .cmp_gte,
+ .cmp_gt,
+ .cmp_neq,
+ .bool_and,
+ .bool_or,
+ .store,
+ => {
+ const o = inst_datas[inst].bin_op;
+ return trackOperands(a, new_set, inst, main_tomb, .{ o.lhs, o.rhs, .none });
+ },
+
+ .alloc,
+ .br,
+ .constant,
+ .breakpoint,
+ .dbg_stmt,
+ .varptr,
+ .unreach,
+ => return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }),
+
+ .not,
+ .bitcast,
+ .load,
+ .ref,
+ .floatcast,
+ .intcast,
+ .optional_payload,
+ .optional_payload_ptr,
+ .wrap_optional,
+ .unwrap_errunion_payload,
+ .unwrap_errunion_err,
+ .unwrap_errunion_payload_ptr,
+ .unwrap_errunion_err_ptr,
+ .wrap_errunion_payload,
+ .wrap_errunion_err,
+ => {
+ const o = inst_datas[inst].ty_op;
+ return trackOperands(a, new_set, inst, main_tomb, .{ o.operand, .none, .none });
+ },
+
+ .is_null,
+ .is_non_null,
+ .is_null_ptr,
+ .is_non_null_ptr,
+ .is_err,
+ .is_non_err,
+ .is_err_ptr,
+ .is_non_err_ptr,
+ .ptrtoint,
+ .ret,
+ => {
+ const operand = inst_datas[inst].un_op;
+ return trackOperands(a, new_set, inst, main_tomb, .{ operand, .none, .none });
+ },
+
+ .call => {
+ const inst_data = inst_datas[inst].pl_op;
+ const callee = inst_data.operand;
+ const extra = a.air.extraData(Air.Call, inst_data.payload);
+ const args = a.air.extra[extra.end..][0..extra.data.args_len];
+ if (args.len <= bpi - 2) {
+ var buf: [bpi - 1]Air.Inst.Ref = undefined;
+ buf[0] = callee;
+ std.mem.copy(&buf, buf[1..], args);
+ return trackOperands(a, new_set, inst, main_tomb, buf);
+ }
+ @panic("TODO: liveness analysis for function with many args");
+ },
+ .struct_field_ptr => {
+ const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data;
+ return trackOperands(a, new_set, inst, main_tomb, .{ extra.struct_ptr, .none, .none });
+ },
+ .block => {
+ const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload);
+ const body = a.air.extra[extra.end..][0..extra.data.body_len];
+ try analyzeWithContext(a, new_set, body);
+ // We let this continue so that it can possibly mark the block as
+ // unreferenced below.
+ return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none });
+ },
+ .loop => {
+ const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload);
+ const body = a.air.extra[extra.end..][0..extra.data.body_len];
+ try analyzeWithContext(a, new_set, body);
+ return; // Loop has no operands and it is always unreferenced.
+ },
+ .cond_br => {
+ // Each death that occurs inside one branch, but not the other, needs
+ // to be added as a death immediately upon entering the other branch.
+ const inst_data = inst_datas[inst].pl_op;
+ const condition = inst_data.operand;
+ const extra = a.air.extraData(Air.CondBr, inst_data.payload);
+ const then_body = a.air.extra[extra.end..][0..extra.data.then_body_len];
+ const else_body = a.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
+
+ var then_table = std.AutoHashMap(Air.Inst.Index, void).init(gpa);
+ defer then_table.deinit();
+ try analyzeWithContext(a, &then_table, then_body);
+
+ // Reset the table back to its state from before the branch.
+ {
+ var it = then_table.keyIterator();
+ while (it.next()) |key| {
+ assert(table.remove(key.*));
+ }
+ }
+
+ var else_table = std.AutoHashMap(Air.Inst.Index, void).init(gpa);
+ defer else_table.deinit();
+ try analyzeWithContext(a, &else_table, else_body);
+
+ var then_entry_deaths = std.ArrayList(Air.Inst.Index).init(gpa);
+ defer then_entry_deaths.deinit();
+ var else_entry_deaths = std.ArrayList(Air.Inst.Index).init(gpa);
+ defer else_entry_deaths.deinit();
+
+ {
+ var it = else_table.keyIterator();
+ while (it.next()) |key| {
+ const else_death = key.*;
+ if (!then_table.contains(else_death)) {
+ try then_entry_deaths.append(else_death);
+ }
+ }
+ }
+ // This loop is the same, except it's for the then branch, and it additionally
+ // has to put its items back into the table to undo the reset.
+ {
+ var it = then_table.keyIterator();
+ while (it.next()) |key| {
+ const then_death = key.*;
+ if (!else_table.contains(then_death)) {
+ try else_entry_deaths.append(then_death);
+ }
+ try table.put(gpa, then_death, {});
+ }
+ }
+ // Now we have to correctly populate new_set.
+ if (new_set) |ns| {
+ try ns.ensureCapacity(@intCast(u32, ns.count() + then_table.count() + else_table.count()));
+ var it = then_table.keyIterator();
+ while (it.next()) |key| {
+ _ = ns.putAssumeCapacity(key.*, {});
+ }
+ it = else_table.keyIterator();
+ while (it.next()) |key| {
+ _ = ns.putAssumeCapacity(key.*, {});
+ }
+ }
+ const then_death_count = @intCast(u32, then_entry_deaths.items.len);
+ const else_death_count = @intCast(u32, else_entry_deaths.items.len);
+
+ try a.extra.ensureUnusedCapacity(std.meta.fields(@TypeOf(CondBr)).len +
+ then_death_count + else_death_count);
+ const extra_index = a.addExtraAssumeCapacity(CondBr{
+ .then_death_count = then_death_count,
+ .else_death_count = else_death_count,
+ });
+ a.extra.appendSliceAssumeCapacity(then_entry_deaths.items);
+ a.extra.appendSliceAssumeCapacity(else_entry_deaths.items);
+ try a.special.put(inst, extra_index);
+
+ // Continue on with the instruction analysis. The following code will find the condition
+ // instruction, and the deaths flag for the CondBr instruction will indicate whether the
+ // condition's lifetime ends immediately before entering any branch.
+ return trackOperands(a, new_set, inst, main_tomb, .{ condition, .none, .none });
+ },
+ .switch_br => {
+ const inst_data = inst_datas[inst].pl_op;
+ const condition = inst_data.operand;
+ const switch_br = a.air.extraData(Air.SwitchBr, inst_data.payload);
+
+ const Table = std.AutoHashMapUnmanaged(Air.Inst.Index, void);
+ const case_tables = try gpa.alloc(Table, switch_br.data.cases_len + 1); // +1 for else
+ defer gpa.free(case_tables);
+
+ std.mem.set(Table, case_tables, .{});
+ defer for (case_tables) |*ct| ct.deinit(gpa);
+
+ var air_extra_index: usize = switch_br.end;
+ for (case_tables[0..switch_br.data.cases_len]) |*case_table| {
+ const case = a.air.extraData(Air.SwitchBr.Case, air_extra_index);
+ const case_body = a.air.extra[case.end..][0..case.data.body_len];
+ air_extra_index = case.end + case_body.len;
+ try analyzeWithContext(a, case_table, case_body);
+
+ // Reset the table back to its state from before the case.
+ var it = case_table.keyIterator();
+ while (it.next()) |key| {
+ assert(table.remove(key.*));
+ }
+ }
+ { // else
+ const else_table = &case_tables[case_tables.len - 1];
+ const else_body = a.air.extra[air_extra_index..][0..switch_br.data.else_body_len];
+ try analyzeWithContext(a, else_table, else_body);
+
+ // Reset the table back to its state from before the case.
+ var it = else_table.keyIterator();
+ while (it.next()) |key| {
+ assert(table.remove(key.*));
+ }
+ }
+
+ const List = std.ArrayListUnmanaged(Air.Inst.Index);
+ const case_deaths = try gpa.alloc(List, case_tables.len); // includes else
+ defer gpa.free(case_deaths);
+
+ std.mem.set(List, case_deaths, .{});
+ defer for (case_deaths) |*cd| cd.deinit(gpa);
+
+ var total_deaths: u32 = 0;
+ for (case_tables) |*ct, i| {
+ total_deaths += ct.count();
+ var it = ct.keyIterator();
+ while (it.next()) |key| {
+ const case_death = key.*;
+ for (case_tables) |*ct_inner, j| {
+ if (i == j) continue;
+ if (!ct_inner.contains(case_death)) {
+ // instruction is not referenced in this case
+ try case_deaths[j].append(gpa, case_death);
+ }
+ }
+ // undo resetting the table
+ try table.put(gpa, case_death, {});
+ }
+ }
+
+ // Now we have to correctly populate new_set.
+ if (new_set) |ns| {
+ try ns.ensureUnusedCapacity(gpa, total_deaths);
+ for (case_tables) |*ct| {
+ var it = ct.keyIterator();
+ while (it.next()) |key| {
+ _ = ns.putAssumeCapacity(key.*, {});
+ }
+ }
+ }
+
+ const else_death_count = @intCast(u32, case_deaths[case_deaths.len - 1].items.len);
+ const extra_index = try a.addExtra(SwitchBr{
+ .else_death_count = else_death_count,
+ });
+ for (case_deaths[0 .. case_deaths.len - 1]) |*cd| {
+ const case_death_count = @intCast(u32, cd.items.len);
+ try a.extra.ensureUnusedCapacity(1 + case_death_count + else_death_count);
+ a.extra.appendAssumeCapacity(case_death_count);
+ a.extra.appendSliceAssumeCapacity(cd.items);
+ }
+ a.extra.appendSliceAssumeCapacity(case_deaths[case_deaths.len - 1].items);
+ try a.special.put(inst, extra_index);
+
+ return trackOperands(a, new_set, inst, main_tomb, .{ condition, .none, .none });
+ },
+ }
+}
+
+fn trackOperands(
+ a: *Analysis,
+ new_set: ?*std.AutoHashMap(Air.Inst.Index, void),
+ inst: Air.Inst.Index,
+ main_tomb: bool,
+ operands: [bpi - 1]Air.Inst.Ref,
+) Allocator.Error!void {
+ const table = &a.table;
+ const gpa = a.gpa;
+
+ var tomb_bits: Bpi = @boolToInt(main_tomb);
+ var i = operands.len;
+
+ while (i > 0) {
+ i -= 1;
+ tomb_bits <<= 1;
+ const op_int = @enumToInt(operands[i]);
+ if (op_int < Air.Inst.Ref.typed_value_map.len) continue;
+ const operand: Air.Inst.Index = op_int - Air.Inst.Ref.typed_value_map.len;
+ const prev = try table.fetchPut(gpa, operand, {});
+ if (prev == null) {
+ // Death.
+ tomb_bits |= 1;
+ if (new_set) |ns| try ns.putNoClobber(operand, {});
+ }
+ }
+ a.storeTombBits(inst, tomb_bits);
+}
diff --git a/src/codegen.zig b/src/codegen.zig
index 205bab755a..91b0401291 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -297,7 +297,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
/// across each runtime branch upon joining.
branch_stack: *std.ArrayList(Branch),
- blocks: std.AutoHashMapUnmanaged(*ir.Inst.Block, BlockData) = .{},
+ // Key is the block instruction
+ blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{},
register_manager: RegisterManager(Self, Register, &callee_preserved_regs) = .{},
/// Maps offset to what is stored there.
@@ -383,7 +384,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
};
const Branch = struct {
- inst_table: std.AutoArrayHashMapUnmanaged(*ir.Inst, MCValue) = .{},
+ inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
fn deinit(self: *Branch, gpa: *Allocator) void {
self.inst_table.deinit(gpa);
@@ -392,7 +393,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
};
const StackAllocation = struct {
- inst: *ir.Inst,
+ inst: Air.Inst.Index,
/// TODO do we need size? should be determined by inst.ty.abiSize()
size: u32,
};
@@ -720,7 +721,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
try self.dbgAdvancePCAndLine(self.end_di_line, self.end_di_column);
}
- fn genBody(self: *Self, body: ir.Body) InnerError!void {
+ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
for (body.instructions) |inst| {
try self.ensureProcessDeathCapacity(@popCount(@TypeOf(inst.deaths), inst.deaths));
@@ -2824,10 +2825,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
fn genDbgStmt(self: *Self, inst: *ir.Inst.DbgStmt) !MCValue {
- // TODO when reworking AIR memory layout, rework source locations here as
- // well to be more efficient, as well as support inlined function calls correctly.
- // For now we convert LazySrcLoc to absolute byte offset, to match what the
- // existing codegen code expects.
try self.dbgAdvancePCAndLine(inst.line, inst.column);
assert(inst.base.isUnused());
return MCValue.dead;
diff --git a/src/liveness.zig b/src/liveness.zig
deleted file mode 100644
index e6692e4fc3..0000000000
--- a/src/liveness.zig
+++ /dev/null
@@ -1,254 +0,0 @@
-const std = @import("std");
-const Air = @import("Air.zig");
-const trace = @import("tracy.zig").trace;
-const log = std.log.scoped(.liveness);
-const assert = std.debug.assert;
-
-/// Perform Liveness Analysis over the `Body`. Each `Inst` will have its `deaths` field populated.
-pub fn analyze(
- /// Used for temporary storage during the analysis.
- gpa: *std.mem.Allocator,
- /// Used to tack on extra allocations in the same lifetime as the existing instructions.
- arena: *std.mem.Allocator,
- body: ir.Body,
-) error{OutOfMemory}!void {
- const tracy = trace(@src());
- defer tracy.end();
-
- var table = std.AutoHashMap(*ir.Inst, void).init(gpa);
- defer table.deinit();
- try table.ensureCapacity(@intCast(u32, body.instructions.len));
- try analyzeWithTable(arena, &table, null, body);
-}
-
-fn analyzeWithTable(
- arena: *std.mem.Allocator,
- table: *std.AutoHashMap(*ir.Inst, void),
- new_set: ?*std.AutoHashMap(*ir.Inst, void),
- body: ir.Body,
-) error{OutOfMemory}!void {
- var i: usize = body.instructions.len;
-
- if (new_set) |ns| {
- // We are only interested in doing this for instructions which are born
- // before a conditional branch, so after obtaining the new set for
- // each branch we prune the instructions which were born within.
- while (i != 0) {
- i -= 1;
- const base = body.instructions[i];
- _ = ns.remove(base);
- try analyzeInst(arena, table, new_set, base);
- }
- } else {
- while (i != 0) {
- i -= 1;
- const base = body.instructions[i];
- try analyzeInst(arena, table, new_set, base);
- }
- }
-}
-
-fn analyzeInst(
- arena: *std.mem.Allocator,
- table: *std.AutoHashMap(*ir.Inst, void),
- new_set: ?*std.AutoHashMap(*ir.Inst, void),
- base: *ir.Inst,
-) error{OutOfMemory}!void {
- if (table.contains(base)) {
- base.deaths = 0;
- } else {
- // No tombstone for this instruction means it is never referenced,
- // and its birth marks its own death. Very metal 🤘
- base.deaths = 1 << ir.Inst.unreferenced_bit_index;
- }
-
- switch (base.tag) {
- .constant => return,
- .block => {
- const inst = base.castTag(.block).?;
- try analyzeWithTable(arena, table, new_set, inst.body);
- // We let this continue so that it can possibly mark the block as
- // unreferenced below.
- },
- .loop => {
- const inst = base.castTag(.loop).?;
- try analyzeWithTable(arena, table, new_set, inst.body);
- return; // Loop has no operands and it is always unreferenced.
- },
- .condbr => {
- const inst = base.castTag(.condbr).?;
-
- // Each death that occurs inside one branch, but not the other, needs
- // to be added as a death immediately upon entering the other branch.
-
- var then_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator);
- defer then_table.deinit();
- try analyzeWithTable(arena, table, &then_table, inst.then_body);
-
- // Reset the table back to its state from before the branch.
- {
- var it = then_table.keyIterator();
- while (it.next()) |key| {
- assert(table.remove(key.*));
- }
- }
-
- var else_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator);
- defer else_table.deinit();
- try analyzeWithTable(arena, table, &else_table, inst.else_body);
-
- var then_entry_deaths = std.ArrayList(*ir.Inst).init(table.allocator);
- defer then_entry_deaths.deinit();
- var else_entry_deaths = std.ArrayList(*ir.Inst).init(table.allocator);
- defer else_entry_deaths.deinit();
-
- {
- var it = else_table.keyIterator();
- while (it.next()) |key| {
- const else_death = key.*;
- if (!then_table.contains(else_death)) {
- try then_entry_deaths.append(else_death);
- }
- }
- }
- // This loop is the same, except it's for the then branch, and it additionally
- // has to put its items back into the table to undo the reset.
- {
- var it = then_table.keyIterator();
- while (it.next()) |key| {
- const then_death = key.*;
- if (!else_table.contains(then_death)) {
- try else_entry_deaths.append(then_death);
- }
- try table.put(then_death, {});
- }
- }
- // Now we have to correctly populate new_set.
- if (new_set) |ns| {
- try ns.ensureCapacity(@intCast(u32, ns.count() + then_table.count() + else_table.count()));
- var it = then_table.keyIterator();
- while (it.next()) |key| {
- _ = ns.putAssumeCapacity(key.*, {});
- }
- it = else_table.keyIterator();
- while (it.next()) |key| {
- _ = ns.putAssumeCapacity(key.*, {});
- }
- }
- inst.then_death_count = std.math.cast(@TypeOf(inst.then_death_count), then_entry_deaths.items.len) catch return error.OutOfMemory;
- inst.else_death_count = std.math.cast(@TypeOf(inst.else_death_count), else_entry_deaths.items.len) catch return error.OutOfMemory;
- const allocated_slice = try arena.alloc(*ir.Inst, then_entry_deaths.items.len + else_entry_deaths.items.len);
- inst.deaths = allocated_slice.ptr;
- std.mem.copy(*ir.Inst, inst.thenDeaths(), then_entry_deaths.items);
- std.mem.copy(*ir.Inst, inst.elseDeaths(), else_entry_deaths.items);
-
- // Continue on with the instruction analysis. The following code will find the condition
- // instruction, and the deaths flag for the CondBr instruction will indicate whether the
- // condition's lifetime ends immediately before entering any branch.
- },
- .switchbr => {
- const inst = base.castTag(.switchbr).?;
-
- const Table = std.AutoHashMap(*ir.Inst, void);
- const case_tables = try table.allocator.alloc(Table, inst.cases.len + 1); // +1 for else
- defer table.allocator.free(case_tables);
-
- std.mem.set(Table, case_tables, Table.init(table.allocator));
- defer for (case_tables) |*ct| ct.deinit();
-
- for (inst.cases) |case, i| {
- try analyzeWithTable(arena, table, &case_tables[i], case.body);
-
- // Reset the table back to its state from before the case.
- var it = case_tables[i].keyIterator();
- while (it.next()) |key| {
- assert(table.remove(key.*));
- }
- }
- { // else
- try analyzeWithTable(arena, table, &case_tables[case_tables.len - 1], inst.else_body);
-
- // Reset the table back to its state from before the case.
- var it = case_tables[case_tables.len - 1].keyIterator();
- while (it.next()) |key| {
- assert(table.remove(key.*));
- }
- }
-
- const List = std.ArrayList(*ir.Inst);
- const case_deaths = try table.allocator.alloc(List, case_tables.len); // +1 for else
- defer table.allocator.free(case_deaths);
-
- std.mem.set(List, case_deaths, List.init(table.allocator));
- defer for (case_deaths) |*cd| cd.deinit();
-
- var total_deaths: u32 = 0;
- for (case_tables) |*ct, i| {
- total_deaths += ct.count();
- var it = ct.keyIterator();
- while (it.next()) |key| {
- const case_death = key.*;
- for (case_tables) |*ct_inner, j| {
- if (i == j) continue;
- if (!ct_inner.contains(case_death)) {
- // instruction is not referenced in this case
- try case_deaths[j].append(case_death);
- }
- }
- // undo resetting the table
- try table.put(case_death, {});
- }
- }
-
- // Now we have to correctly populate new_set.
- if (new_set) |ns| {
- try ns.ensureCapacity(@intCast(u32, ns.count() + total_deaths));
- for (case_tables) |*ct| {
- var it = ct.keyIterator();
- while (it.next()) |key| {
- _ = ns.putAssumeCapacity(key.*, {});
- }
- }
- }
-
- total_deaths = 0;
- for (case_deaths[0 .. case_deaths.len - 1]) |*ct, i| {
- inst.cases[i].index = total_deaths;
- const len = std.math.cast(@TypeOf(inst.else_deaths), ct.items.len) catch return error.OutOfMemory;
- inst.cases[i].deaths = len;
- total_deaths += len;
- }
- { // else
- const else_deaths = std.math.cast(@TypeOf(inst.else_deaths), case_deaths[case_deaths.len - 1].items.len) catch return error.OutOfMemory;
- inst.else_index = total_deaths;
- inst.else_deaths = else_deaths;
- total_deaths += else_deaths;
- }
-
- const allocated_slice = try arena.alloc(*ir.Inst, total_deaths);
- inst.deaths = allocated_slice.ptr;
- for (case_deaths[0 .. case_deaths.len - 1]) |*cd, i| {
- std.mem.copy(*ir.Inst, inst.caseDeaths(i), cd.items);
- }
- std.mem.copy(*ir.Inst, inst.elseDeaths(), case_deaths[case_deaths.len - 1].items);
- },
- else => {},
- }
-
- const needed_bits = base.operandCount();
- if (needed_bits <= ir.Inst.deaths_bits) {
- var bit_i: ir.Inst.DeathsBitIndex = 0;
- while (base.getOperand(bit_i)) |operand| : (bit_i += 1) {
- const prev = try table.fetchPut(operand, {});
- if (prev == null) {
- // Death.
- base.deaths |= @as(ir.Inst.DeathsInt, 1) << bit_i;
- if (new_set) |ns| try ns.putNoClobber(operand, {});
- }
- }
- } else {
- @panic("Handle liveness analysis for instructions with many parameters");
- }
-
- log.debug("analyze {}: 0b{b}\n", .{ base.tag, base.deaths });
-}