aboutsummaryrefslogtreecommitdiff
path: root/src/arch/x86_64/CodeGen.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2023-04-21 13:32:25 -0700
committerGitHub <noreply@github.com>2023-04-21 13:32:25 -0700
commit528b66f6ec9cfb140abff3dc0c4735c179520f42 (patch)
tree6fc4f164a93a6bd3c2c3467457aedce6fa7959b3 /src/arch/x86_64/CodeGen.zig
parent391663e497f1871f6bddcf9cbc500710aa9aac4d (diff)
parentb3f9fe6d0439bcbb5c6baa77c0646c4da2e06dd7 (diff)
downloadzig-528b66f6ec9cfb140abff3dc0c4735c179520f42.tar.gz
zig-528b66f6ec9cfb140abff3dc0c4735c179520f42.zip
Merge pull request #15355 from mlugg/feat/liveness-control-flow
Liveness: control flow analysis and other goodies
Diffstat (limited to 'src/arch/x86_64/CodeGen.zig')
-rw-r--r--src/arch/x86_64/CodeGen.zig3160
1 files changed, 1430 insertions, 1730 deletions
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 0a59389839..1c72e2296b 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -79,14 +79,8 @@ end_di_column: u32,
/// which is a relative jump, based on the address following the reloc.
exitlude_jump_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{},
-/// Whenever there is a runtime branch, we push a Branch onto this stack,
-/// and pop it off when the runtime branch joins. This provides an "overlay"
-/// of the table of mappings from instructions to `MCValue` from within the branch.
-/// This way we can modify the `MCValue` for an instruction in different ways
-/// within different branches. Special consideration is needed when a branch
-/// joins with its parent, to make sure all instructions have the same MCValue
-/// across each runtime branch upon joining.
-branch_stack: *std.ArrayList(Branch),
+const_tracking: InstTrackingMap = .{},
+inst_tracking: InstTrackingMap = .{},
// Key is the block instruction
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{},
@@ -95,6 +89,9 @@ register_manager: RegisterManager = .{},
/// Maps offset to what is stored there.
stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{},
+/// Generation of the current scope, increments by 1 for every entered scope.
+scope_generation: u32 = 0,
+
/// Offset from the stack base, representing the end of the stack frame.
max_end_stack: u32 = 0,
/// Represents the current end stack offset. If there is no existing slot
@@ -105,10 +102,12 @@ next_stack_offset: u32 = 0,
air_bookkeeping: @TypeOf(air_bookkeeping_init) = air_bookkeeping_init,
/// For mir debug info, maps a mir index to a air index
-mir_to_air_map: if (builtin.mode == .Debug) std.AutoHashMap(Mir.Inst.Index, Air.Inst.Index) else void,
+mir_to_air_map: @TypeOf(mir_to_air_map_init) = mir_to_air_map_init,
const air_bookkeeping_init = if (std.debug.runtime_safety) @as(usize, 0) else {};
+const mir_to_air_map_init = if (builtin.mode == .Debug) std.AutoHashMapUnmanaged(Mir.Inst.Index, Air.Inst.Index){} else {};
+
pub const MCValue = union(enum) {
/// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc.
/// TODO Look into deleting this tag and using `dead` instead, since every use
@@ -117,7 +116,8 @@ pub const MCValue = union(enum) {
/// Control flow will not allow this value to be observed.
unreach,
/// No more references to this value remain.
- dead,
+ /// The payload is the value of scope_generation at the point where the death occurred
+ dead: u32,
/// The value is undefined.
undef,
/// A pointer-sized integer that fits in a register.
@@ -183,47 +183,95 @@ pub const MCValue = union(enum) {
}
};
-const Branch = struct {
- inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
+const InstTrackingMap = std.AutoArrayHashMapUnmanaged(Air.Inst.Index, InstTracking);
+const InstTracking = struct {
+ long: MCValue,
+ short: MCValue,
- fn deinit(self: *Branch, gpa: Allocator) void {
- self.inst_table.deinit(gpa);
- self.* = undefined;
+ fn init(result: MCValue) InstTracking {
+ return .{ .long = result, .short = result };
}
- const FormatContext = struct {
- insts: []const Air.Inst.Index,
- mcvs: []const MCValue,
- };
+ fn getReg(self: InstTracking) ?Register {
+ return switch (self.short) {
+ .register => |reg| reg,
+ .register_overflow => |ro| ro.reg,
+ else => null,
+ };
+ }
- fn fmt(
- ctx: FormatContext,
- comptime unused_format_string: []const u8,
- options: std.fmt.FormatOptions,
- writer: anytype,
- ) @TypeOf(writer).Error!void {
- _ = options;
- comptime assert(unused_format_string.len == 0);
- try writer.writeAll("Branch {\n");
- for (ctx.insts, ctx.mcvs) |inst, mcv| {
- try writer.print(" %{d} => {}\n", .{ inst, mcv });
+ fn getCondition(self: InstTracking) ?Condition {
+ return switch (self.short) {
+ .eflags => |eflags| eflags,
+ .register_overflow => |ro| ro.eflags,
+ else => null,
+ };
+ }
+
+ fn spill(self: *InstTracking, function: *Self, inst: Air.Inst.Index) !void {
+ switch (self.long) {
+ .none,
+ .dead,
+ .unreach,
+ => unreachable,
+ .register,
+ .register_overflow,
+ .eflags,
+ => self.long = try function.allocRegOrMem(inst, self.short == .eflags),
+ .stack_offset => {},
+ .undef,
+ .immediate,
+ .memory,
+ .load_direct,
+ .lea_direct,
+ .load_got,
+ .load_tlv,
+ .lea_tlv,
+ .ptr_stack_offset,
+ => return, // these can be rematerialized without using a stack slot
+ }
+ log.debug("spilling %{d} from {} to {}", .{ inst, self.short, self.long });
+ const ty = function.air.typeOfIndex(inst);
+ try function.setRegOrMem(ty, self.long, self.short);
+ }
+
+ fn trackSpill(self: *InstTracking, function: *Self) void {
+ if (self.getReg()) |reg| function.register_manager.freeReg(reg);
+ switch (self.short) {
+ .none, .dead, .unreach => unreachable,
+ else => {},
}
- try writer.writeAll("}");
+ self.short = self.long;
}
- fn format(branch: Branch, comptime unused_format_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
- _ = branch;
- _ = unused_format_string;
- _ = options;
- _ = writer;
- @compileError("do not format Branch directly; use ty.fmtDebug()");
+ fn materialize(self: *InstTracking, function: *Self, inst: Air.Inst.Index, reg: Register) !void {
+ const ty = function.air.typeOfIndex(inst);
+ try function.genSetReg(ty, reg, self.long);
}
- fn fmtDebug(self: @This()) std.fmt.Formatter(fmt) {
- return .{ .data = .{
- .insts = self.inst_table.keys(),
- .mcvs = self.inst_table.values(),
- } };
+ fn trackMaterialize(self: *InstTracking, function: *Self, inst: Air.Inst.Index, reg: Register) void {
+ assert(inst == function.register_manager.registers[
+ RegisterManager.indexOfRegIntoTracked(reg).?
+ ]);
+ self.short = .{ .register = reg };
+ }
+
+ fn resurrect(self: *InstTracking, scope_generation: u32) void {
+ switch (self.short) {
+ .dead => |die_generation| if (die_generation >= scope_generation) {
+ self.short = self.long;
+ },
+ else => {},
+ }
+ }
+
+ fn die(self: *InstTracking, function: *Self) void {
+ function.freeValue(self.short);
+ self.reuse(function);
+ }
+
+ fn reuse(self: *InstTracking, function: *Self) void {
+ self.short = .{ .dead = function.scope_generation };
}
};
@@ -235,39 +283,14 @@ const StackAllocation = struct {
const BlockData = struct {
relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{},
- branch: Branch = .{},
- branch_depth: u32,
+ state: State,
fn deinit(self: *BlockData, gpa: Allocator) void {
- self.branch.deinit(gpa);
self.relocs.deinit(gpa);
self.* = undefined;
}
};
-const BigTomb = struct {
- function: *Self,
- inst: Air.Inst.Index,
- lbt: Liveness.BigTomb,
-
- fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) void {
- const dies = bt.lbt.feed();
- const op_index = Air.refToIndex(op_ref) orelse return;
- if (!dies) return;
- bt.function.processDeath(op_index);
- }
-
- fn finishAir(bt: *BigTomb, result: MCValue) void {
- const is_used = !bt.function.liveness.isUnused(bt.inst);
- if (is_used) {
- log.debug(" (saving %{d} => {})", .{ bt.inst, result });
- const branch = &bt.function.branch_stack.items[bt.function.branch_stack.items.len - 1];
- branch.inst_table.putAssumeCapacityNoClobber(bt.inst, result);
- }
- bt.function.finishAirBookkeeping();
- }
-};
-
const Self = @This();
pub fn generate(
@@ -294,19 +317,9 @@ pub fn generate(
stderr.writeAll(":\n") catch {};
}
- var branch_stack = std.ArrayList(Branch).init(bin_file.allocator);
- try branch_stack.ensureUnusedCapacity(2);
- // The outermost branch is used for constants only.
- branch_stack.appendAssumeCapacity(.{});
- branch_stack.appendAssumeCapacity(.{});
- defer {
- assert(branch_stack.items.len == 2);
- for (branch_stack.items) |*branch| branch.deinit(bin_file.allocator);
- branch_stack.deinit();
- }
-
+ const gpa = bin_file.allocator;
var function = Self{
- .gpa = bin_file.allocator,
+ .gpa = gpa,
.air = air,
.liveness = liveness,
.target = &bin_file.options.target,
@@ -318,21 +331,23 @@ pub fn generate(
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
.fn_type = fn_type,
.arg_index = 0,
- .branch_stack = &branch_stack,
.src_loc = src_loc,
.stack_align = undefined,
.end_di_line = module_fn.rbrace_line,
.end_di_column = module_fn.rbrace_column,
- .mir_to_air_map = if (builtin.mode == .Debug)
- std.AutoHashMap(Mir.Inst.Index, Air.Inst.Index).init(bin_file.allocator)
- else {},
};
- defer function.stack.deinit(bin_file.allocator);
- defer function.blocks.deinit(bin_file.allocator);
- defer function.exitlude_jump_relocs.deinit(bin_file.allocator);
- defer function.mir_instructions.deinit(bin_file.allocator);
- defer function.mir_extra.deinit(bin_file.allocator);
- defer if (builtin.mode == .Debug) function.mir_to_air_map.deinit();
+ defer {
+ function.stack.deinit(gpa);
+ var block_it = function.blocks.valueIterator();
+ while (block_it.next()) |block| block.deinit(gpa);
+ function.blocks.deinit(gpa);
+ function.inst_tracking.deinit(gpa);
+ function.const_tracking.deinit(gpa);
+ function.exitlude_jump_relocs.deinit(gpa);
+ function.mir_instructions.deinit(gpa);
+ function.mir_extra.deinit(gpa);
+ if (builtin.mode == .Debug) function.mir_to_air_map.deinit(gpa);
+ }
var call_info = function.resolveCallingConventionValues(fn_type, &.{}) catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
@@ -905,11 +920,12 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
const air_tags = self.air.instructions.items(.tag);
for (body) |inst| {
- const old_air_bookkeeping = self.air_bookkeeping;
- try self.ensureProcessDeathCapacity(Liveness.bpi);
if (builtin.mode == .Debug) {
- try self.mir_to_air_map.put(@intCast(Mir.Inst.Index, self.mir_instructions.len), inst);
+ const mir_inst = @intCast(Mir.Inst.Index, self.mir_instructions.len);
+ try self.mir_to_air_map.put(self.gpa, mir_inst, inst);
}
+
+ if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) continue;
if (debug_wip_mir) @import("../../print_air.zig").dumpInst(
inst,
self.bin_file.options.module.?,
@@ -917,6 +933,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
self.liveness,
);
+ const old_air_bookkeeping = self.air_bookkeeping;
+ try self.inst_tracking.ensureUnusedCapacity(self.gpa, 1);
switch (air_tags[inst]) {
// zig fmt: off
.not,
@@ -1080,7 +1098,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.field_parent_ptr => try self.airFieldParentPtr(inst),
- .switch_br => try self.airSwitch(inst),
+ .switch_br => try self.airSwitchBr(inst),
.slice_ptr => try self.airSlicePtr(inst),
.slice_len => try self.airSliceLen(inst),
@@ -1166,8 +1184,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
var it = self.register_manager.free_registers.iterator(.{ .kind = .unset });
while (it.next()) |index| {
const tracked_inst = self.register_manager.registers[index];
- const tracked_mcv = self.getResolvedInstValue(tracked_inst).?.*;
- assert(RegisterManager.indexOfRegIntoTracked(switch (tracked_mcv) {
+ const tracking = self.getResolvedInstValue(tracked_inst);
+ assert(RegisterManager.indexOfRegIntoTracked(switch (tracking.short) {
.register => |reg| reg,
.register_overflow => |ro| ro.reg,
else => unreachable,
@@ -1205,16 +1223,16 @@ fn freeValue(self: *Self, value: MCValue) void {
}
}
+fn feed(self: *Self, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) void {
+ if (bt.feed()) if (Air.refToIndex(operand)) |inst| self.processDeath(inst);
+}
+
/// Asserts there is already capacity to insert into top branch inst_table.
fn processDeath(self: *Self, inst: Air.Inst.Index) void {
const air_tags = self.air.instructions.items(.tag);
- if (air_tags[inst] == .constant) return; // Constants are immortal.
- const prev_value = (self.getResolvedInstValue(inst) orelse return).*;
+ if (air_tags[inst] == .constant) return;
log.debug("%{d} => {}", .{ inst, MCValue.dead });
- // When editing this function, note that the logic must synchronize with `reuseOperand`.
- const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
- branch.inst_table.putAssumeCapacity(inst, .dead);
- self.freeValue(prev_value);
+ self.inst_tracking.getPtr(inst).?.die(self);
}
/// Called when there are no operands, and the instruction is always unreferenced.
@@ -1224,6 +1242,21 @@ fn finishAirBookkeeping(self: *Self) void {
}
}
+fn finishAirResult(self: *Self, inst: Air.Inst.Index, result: MCValue) void {
+ if (self.liveness.isUnused(inst)) switch (result) {
+ .none, .dead, .unreach => {},
+ else => unreachable, // Why didn't the result die?
+ } else {
+ log.debug("%{d} => {}", .{ inst, result });
+ self.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(result));
+ // In some cases, an operand may be reused as the result.
+ // If that operand died and was a register, it was freed by
+ // processDeath, so we have to "re-allocate" the register.
+ self.getValue(result, inst);
+ }
+ self.finishAirBookkeeping();
+}
+
fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void {
var tomb_bits = self.liveness.getTombBits(inst);
for (operands) |op| {
@@ -1235,26 +1268,7 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live
const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
self.processDeath(op_index);
}
- const is_used = @truncate(u1, tomb_bits) == 0;
- if (is_used) {
- log.debug("%{d} => {}", .{ inst, result });
- const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
- branch.inst_table.putAssumeCapacityNoClobber(inst, result);
- // In some cases, an operand may be reused as the result.
- // If that operand died and was a register, it was freed by
- // processDeath, so we have to "re-allocate" the register.
- self.getValue(result, inst);
- } else switch (result) {
- .none, .dead, .unreach => {},
- else => unreachable, // Why didn't the result die?
- }
- self.finishAirBookkeeping();
-}
-
-fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
- // In addition to the caller's needs, we need enough space to spill every register and eflags.
- const table = &self.branch_stack.items[self.branch_stack.items.len - 1].inst_table;
- try table.ensureUnusedCapacity(self.gpa, additional_count + self.register_manager.registers.len + 1);
+ self.finishAirResult(inst, result);
}
fn allocMem(self: *Self, inst: ?Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 {
@@ -1339,66 +1353,115 @@ fn allocRegOrMemAdvanced(self: *Self, elem_ty: Type, inst: ?Air.Inst.Index, reg_
}
const State = struct {
- registers: abi.RegisterManager.TrackedRegisters,
- free_registers: abi.RegisterManager.RegisterBitSet,
- eflags_inst: ?Air.Inst.Index,
+ registers: RegisterManager.TrackedRegisters,
+ free_registers: RegisterManager.RegisterBitSet,
+ inst_tracking_len: u32,
+ scope_generation: u32,
};
-fn captureState(self: *Self) State {
- return State{
- .registers = self.register_manager.registers,
- .free_registers = self.register_manager.free_registers,
- .eflags_inst = self.eflags_inst,
- };
+fn initRetroactiveState(self: *Self) State {
+ var state: State = undefined;
+ state.inst_tracking_len = @intCast(u32, self.inst_tracking.count());
+ state.scope_generation = self.scope_generation;
+ return state;
}
-fn revertState(self: *Self, state: State) void {
- self.eflags_inst = state.eflags_inst;
- self.register_manager.free_registers = state.free_registers;
- self.register_manager.registers = state.registers;
+fn saveRetroactiveState(self: *Self, state: *State) !void {
+ try self.spillEflagsIfOccupied();
+ state.registers = self.register_manager.registers;
+ state.free_registers = self.register_manager.free_registers;
}
-pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void {
- const stack_mcv = try self.allocRegOrMem(inst, false);
- log.debug("spilling %{d} to stack mcv {any}", .{ inst, stack_mcv });
- const reg_mcv = self.getResolvedInstValue(inst).?.*;
- switch (reg_mcv) {
- .register => |other| {
- assert(reg.to64() == other.to64());
- },
- .register_overflow => |ro| {
- assert(reg.to64() == ro.reg.to64());
- },
- else => {},
- }
- const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
- branch.inst_table.putAssumeCapacity(inst, stack_mcv);
- try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv, .{});
+fn saveState(self: *Self) !State {
+ var state = self.initRetroactiveState();
+ try self.saveRetroactiveState(&state);
+ return state;
}
-pub fn spillEflagsIfOccupied(self: *Self) !void {
- if (self.eflags_inst) |inst_to_save| {
- const mcv = self.getResolvedInstValue(inst_to_save).?.*;
- const new_mcv = switch (mcv) {
- .register_overflow => try self.allocRegOrMem(inst_to_save, false),
- .eflags => try self.allocRegOrMem(inst_to_save, true),
- else => unreachable,
- };
-
- try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv);
- log.debug("spilling %{d} to mcv {any}", .{ inst_to_save, new_mcv });
+fn restoreState(self: *Self, state: State, deaths: []const Air.Inst.Index, comptime opts: struct {
+ emit_instructions: bool,
+ update_tracking: bool,
+ resurrect: bool,
+ close_scope: bool,
+}) !void {
+ if (opts.close_scope) {
+ for (self.inst_tracking.values()[state.inst_tracking_len..]) |*tracking| tracking.die(self);
+ self.inst_tracking.shrinkRetainingCapacity(state.inst_tracking_len);
+ }
- const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
- branch.inst_table.putAssumeCapacity(inst_to_save, new_mcv);
+ if (opts.resurrect) for (self.inst_tracking.values()[0..state.inst_tracking_len]) |*tracking|
+ tracking.resurrect(state.scope_generation);
+ const air_tags = self.air.instructions.items(.tag);
+ for (deaths) |death| switch (air_tags[death]) {
+ .constant => {},
+ .const_ty => unreachable,
+ else => self.inst_tracking.getPtr(death).?.die(self),
+ };
+ for (0..state.registers.len) |index| {
+ const current_maybe_inst = if (self.register_manager.free_registers.isSet(index))
+ null
+ else
+ self.register_manager.registers[index];
+ const target_maybe_inst = if (state.free_registers.isSet(index))
+ null
+ else
+ state.registers[index];
+ if (std.debug.runtime_safety) if (target_maybe_inst) |target_inst|
+ assert(self.inst_tracking.getIndex(target_inst).? < state.inst_tracking_len);
+ if (current_maybe_inst == target_maybe_inst) continue;
+ const reg = RegisterManager.regAtTrackedIndex(
+ @intCast(RegisterManager.RegisterBitSet.ShiftInt, index),
+ );
+ if (opts.emit_instructions) {
+ if (current_maybe_inst) |current_inst| {
+ try self.inst_tracking.getPtr(current_inst).?.spill(self, current_inst);
+ }
+ if (target_maybe_inst) |target_inst| {
+ try self.inst_tracking.getPtr(target_inst).?.materialize(self, target_inst, reg);
+ }
+ }
+ if (opts.update_tracking) {
+ if (current_maybe_inst) |current_inst| {
+ self.inst_tracking.getPtr(current_inst).?.trackSpill(self);
+ }
+ self.register_manager.freeReg(reg);
+ self.register_manager.getRegAssumeFree(reg, target_maybe_inst);
+ if (target_maybe_inst) |target_inst| {
+ self.inst_tracking.getPtr(target_inst).?.trackMaterialize(self, target_inst, reg);
+ }
+ }
+ }
+ if (opts.emit_instructions) if (self.eflags_inst) |inst|
+ try self.inst_tracking.getPtr(inst).?.spill(self, inst);
+ if (opts.update_tracking) if (self.eflags_inst) |inst| {
self.eflags_inst = null;
+ self.inst_tracking.getPtr(inst).?.trackSpill(self);
+ };
- // TODO consolidate with register manager and spillInstruction
- // this call should really belong in the register manager!
- switch (mcv) {
- .register_overflow => |ro| self.register_manager.freeReg(ro.reg),
- else => {},
- }
+ if (opts.update_tracking and std.debug.runtime_safety) {
+ assert(self.eflags_inst == null);
+ assert(self.register_manager.free_registers.eql(state.free_registers));
+ var used_reg_it = state.free_registers.iterator(.{ .kind = .unset });
+ while (used_reg_it.next()) |index|
+ assert(self.register_manager.registers[index] == state.registers[index]);
+ }
+}
+
+pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void {
+ const tracking = self.inst_tracking.getPtr(inst).?;
+ assert(tracking.getReg().?.to64() == reg.to64());
+ try tracking.spill(self, inst);
+ tracking.trackSpill(self);
+}
+
+pub fn spillEflagsIfOccupied(self: *Self) !void {
+ if (self.eflags_inst) |inst| {
+ self.eflags_inst = null;
+ const tracking = self.inst_tracking.getPtr(inst).?;
+ assert(tracking.getCondition() != null);
+ try tracking.spill(self, inst);
+ tracking.trackSpill(self);
}
}
@@ -1442,22 +1505,14 @@ fn copyToRegisterWithInstTracking(self: *Self, reg_owner: Air.Inst.Index, ty: Ty
}
fn airAlloc(self: *Self, inst: Air.Inst.Index) !void {
- const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
-
- const stack_offset = try self.allocMemPtr(inst);
- break :result .{ .ptr_stack_offset = @intCast(i32, stack_offset) };
- };
+ const stack_offset = try self.allocMemPtr(inst);
+ const result = MCValue{ .ptr_stack_offset = @intCast(i32, stack_offset) };
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void {
- const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
-
- const stack_offset = try self.allocMemPtr(inst);
- break :result .{ .ptr_stack_offset = @intCast(i32, stack_offset) };
- };
+ const stack_offset = try self.allocMemPtr(inst);
+ const result = MCValue{ .ptr_stack_offset = @intCast(i32, stack_offset) };
return self.finishAir(inst, result, .{ .none, .none, .none });
}
@@ -1477,126 +1532,125 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result = if (self.liveness.isUnused(inst)) .dead else result: {
- const src_ty = self.air.typeOf(ty_op.operand);
- const src_int_info = src_ty.intInfo(self.target.*);
- const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*));
- const src_mcv = try self.resolveInst(ty_op.operand);
- const src_lock = switch (src_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
-
- const dst_ty = self.air.typeOfIndex(inst);
- const dst_int_info = dst_ty.intInfo(self.target.*);
- const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
- const dst_mcv = if (dst_abi_size <= src_abi_size and
- self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
- src_mcv
- else
- try self.allocRegOrMem(inst, true);
- const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty;
- const signedness: std.builtin.Signedness = if (dst_int_info.signedness == .signed and
- src_int_info.signedness == .signed) .signed else .unsigned;
- switch (dst_mcv) {
- .register => |dst_reg| {
- const min_abi_size = @min(dst_abi_size, src_abi_size);
- const tag: Mir.Inst.Tag = switch (signedness) {
- .signed => .movsx,
- .unsigned => if (min_abi_size > 2) .mov else .movzx,
- };
- const dst_alias = switch (tag) {
- .movsx => dst_reg.to64(),
- .mov, .movzx => if (min_abi_size > 4) dst_reg.to64() else dst_reg.to32(),
- else => unreachable,
- };
- switch (src_mcv) {
- .register => |src_reg| {
- try self.asmRegisterRegister(
- tag,
- dst_alias,
- registerAlias(src_reg, min_abi_size),
- );
- },
- .stack_offset => |src_off| {
- try self.asmRegisterMemory(tag, dst_alias, Memory.sib(
- Memory.PtrSize.fromSize(min_abi_size),
- .{ .base = .rbp, .disp = -src_off },
- ));
- },
- else => return self.fail("TODO airIntCast from {s} to {s}", .{
- @tagName(src_mcv),
- @tagName(dst_mcv),
- }),
- }
- if (self.regExtraBits(min_ty) > 0) try self.truncateRegister(min_ty, dst_reg);
- },
- else => {
- try self.setRegOrMem(min_ty, dst_mcv, src_mcv);
- const extra = dst_abi_size * 8 - dst_int_info.bits;
- if (extra > 0) {
- try self.genShiftBinOpMir(switch (signedness) {
- .signed => .sal,
- .unsigned => .shl,
- }, dst_ty, dst_mcv, .{ .immediate = extra });
- try self.genShiftBinOpMir(switch (signedness) {
- .signed => .sar,
- .unsigned => .shr,
- }, dst_ty, dst_mcv, .{ .immediate = extra });
- }
- },
- }
- break :result dst_mcv;
+ const src_ty = self.air.typeOf(ty_op.operand);
+ const src_int_info = src_ty.intInfo(self.target.*);
+ const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*));
+ const src_mcv = try self.resolveInst(ty_op.operand);
+ const src_lock = switch (src_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
};
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const dst_ty = self.air.typeOfIndex(inst);
+ const dst_int_info = dst_ty.intInfo(self.target.*);
+ const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
+ const dst_mcv = if (dst_abi_size <= src_abi_size and
+ self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
+ src_mcv
+ else
+ try self.allocRegOrMem(inst, true);
+
+ const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty;
+ const signedness: std.builtin.Signedness = if (dst_int_info.signedness == .signed and
+ src_int_info.signedness == .signed) .signed else .unsigned;
+ switch (dst_mcv) {
+ .register => |dst_reg| {
+ const min_abi_size = @min(dst_abi_size, src_abi_size);
+ const tag: Mir.Inst.Tag = switch (signedness) {
+ .signed => .movsx,
+ .unsigned => if (min_abi_size > 2) .mov else .movzx,
+ };
+ const dst_alias = switch (tag) {
+ .movsx => dst_reg.to64(),
+ .mov, .movzx => if (min_abi_size > 4) dst_reg.to64() else dst_reg.to32(),
+ else => unreachable,
+ };
+ switch (src_mcv) {
+ .register => |src_reg| {
+ try self.asmRegisterRegister(
+ tag,
+ dst_alias,
+ registerAlias(src_reg, min_abi_size),
+ );
+ },
+ .stack_offset => |src_off| {
+ try self.asmRegisterMemory(tag, dst_alias, Memory.sib(
+ Memory.PtrSize.fromSize(min_abi_size),
+ .{ .base = .rbp, .disp = -src_off },
+ ));
+ },
+ else => return self.fail("TODO airIntCast from {s} to {s}", .{
+ @tagName(src_mcv),
+ @tagName(dst_mcv),
+ }),
+ }
+ if (self.regExtraBits(min_ty) > 0) try self.truncateRegister(min_ty, dst_reg);
+ },
+ else => {
+ try self.setRegOrMem(min_ty, dst_mcv, src_mcv);
+ const extra = dst_abi_size * 8 - dst_int_info.bits;
+ if (extra > 0) {
+ try self.genShiftBinOpMir(switch (signedness) {
+ .signed => .sal,
+ .unsigned => .shl,
+ }, dst_ty, dst_mcv, .{ .immediate = extra });
+ try self.genShiftBinOpMir(switch (signedness) {
+ .signed => .sar,
+ .unsigned => .shr,
+ }, dst_ty, dst_mcv, .{ .immediate = extra });
+ }
+ },
+ }
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result = if (self.liveness.isUnused(inst)) .dead else result: {
- const dst_ty = self.air.typeOfIndex(inst);
- const dst_abi_size = dst_ty.abiSize(self.target.*);
- if (dst_abi_size > 8) {
- return self.fail("TODO implement trunc for abi sizes larger than 8", .{});
- }
- const src_mcv = try self.resolveInst(ty_op.operand);
- const src_lock = switch (src_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
-
- const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
- src_mcv
- else
- try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
+ const dst_ty = self.air.typeOfIndex(inst);
+ const dst_abi_size = dst_ty.abiSize(self.target.*);
+ if (dst_abi_size > 8) {
+ return self.fail("TODO implement trunc for abi sizes larger than 8", .{});
+ }
- // when truncating a `u16` to `u5`, for example, those top 3 bits in the result
- // have to be removed. this only happens if the dst if not a power-of-two size.
- if (self.regExtraBits(dst_ty) > 0) try self.truncateRegister(dst_ty, dst_mcv.register.to64());
- break :result dst_mcv;
+ const src_mcv = try self.resolveInst(ty_op.operand);
+ const src_lock = switch (src_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
};
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
+ src_mcv
+ else
+ try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
+
+ // when truncating a `u16` to `u5`, for example, those top 3 bits in the result
+ // have to be removed. this only happens if the dst if not a power-of-two size.
+ if (self.regExtraBits(dst_ty) > 0) try self.truncateRegister(dst_ty, dst_mcv.register.to64());
+
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const ty = self.air.typeOfIndex(inst);
+
const operand = try self.resolveInst(un_op);
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else operand;
- return self.finishAir(inst, result, .{ un_op, .none, .none });
+ const dst_mcv = if (self.reuseOperand(inst, un_op, 0, operand))
+ operand
+ else
+ try self.copyToRegisterWithInstTracking(inst, ty, operand);
+
+ return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none });
}
fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
- }
-
const ptr = try self.resolveInst(bin_op.lhs);
const ptr_ty = self.air.typeOf(bin_op.lhs);
const len = try self.resolveInst(bin_op.rhs);
@@ -1612,33 +1666,21 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
fn airUnOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-
- const result = if (self.liveness.isUnused(inst))
- .dead
- else
- try self.genUnOp(inst, tag, ty_op.operand);
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ const dst_mcv = try self.genUnOp(inst, tag, ty_op.operand);
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-
- const result = if (self.liveness.isUnused(inst))
- .dead
- else
- try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs);
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ const dst_mcv = try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs);
+ return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
-
- const result = if (self.liveness.isUnused(inst))
- .dead
- else
- try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs);
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ const dst_mcv = try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs);
+ return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 {
@@ -1678,7 +1720,7 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 {
fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result = if (self.liveness.isUnused(inst)) .dead else result: {
+ const result = result: {
const tag = self.air.instructions.items(.tag)[inst];
const dst_ty = self.air.typeOfIndex(inst);
if (dst_ty.zigTypeTag() == .Float)
@@ -1709,168 +1751,162 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const ty = self.air.typeOf(bin_op.lhs);
+ const ty = self.air.typeOf(bin_op.lhs);
- const lhs_mcv = try self.resolveInst(bin_op.lhs);
- const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv))
- lhs_mcv
- else
- try self.copyToRegisterWithInstTracking(inst, ty, lhs_mcv);
- const dst_reg = dst_mcv.register;
- const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
- defer self.register_manager.unlockReg(dst_lock);
+ const lhs_mcv = try self.resolveInst(bin_op.lhs);
+ const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv))
+ lhs_mcv
+ else
+ try self.copyToRegisterWithInstTracking(inst, ty, lhs_mcv);
+ const dst_reg = dst_mcv.register;
+ const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
+ defer self.register_manager.unlockReg(dst_lock);
- const rhs_mcv = try self.resolveInst(bin_op.rhs);
- const rhs_lock = switch (rhs_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
-
- const limit_reg = try self.register_manager.allocReg(null, gp);
- const limit_mcv = MCValue{ .register = limit_reg };
- const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg);
- defer self.register_manager.unlockReg(limit_lock);
-
- const reg_bits = self.regBitSize(ty);
- const cc: Condition = if (ty.isSignedInt()) cc: {
- try self.genSetReg(ty, limit_reg, dst_mcv);
- try self.genShiftBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
- try self.genBinOpMir(.xor, ty, limit_mcv, .{
- .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
- });
- break :cc .o;
- } else cc: {
- try self.genSetReg(ty, limit_reg, .{
- .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - reg_bits),
- });
- break :cc .c;
- };
- try self.genBinOpMir(.add, ty, dst_mcv, rhs_mcv);
+ const rhs_mcv = try self.resolveInst(bin_op.rhs);
+ const rhs_lock = switch (rhs_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
- const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2);
- try self.asmCmovccRegisterRegister(
- registerAlias(dst_reg, cmov_abi_size),
- registerAlias(limit_reg, cmov_abi_size),
- cc,
- );
- break :result dst_mcv;
+ const limit_reg = try self.register_manager.allocReg(null, gp);
+ const limit_mcv = MCValue{ .register = limit_reg };
+ const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg);
+ defer self.register_manager.unlockReg(limit_lock);
+
+ const reg_bits = self.regBitSize(ty);
+ const cc: Condition = if (ty.isSignedInt()) cc: {
+ try self.genSetReg(ty, limit_reg, dst_mcv);
+ try self.genShiftBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
+ try self.genBinOpMir(.xor, ty, limit_mcv, .{
+ .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
+ });
+ break :cc .o;
+ } else cc: {
+ try self.genSetReg(ty, limit_reg, .{
+ .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - reg_bits),
+ });
+ break :cc .c;
};
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ try self.genBinOpMir(.add, ty, dst_mcv, rhs_mcv);
+
+ const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2);
+ try self.asmCmovccRegisterRegister(
+ registerAlias(dst_reg, cmov_abi_size),
+ registerAlias(limit_reg, cmov_abi_size),
+ cc,
+ );
+
+ return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const ty = self.air.typeOf(bin_op.lhs);
+ const ty = self.air.typeOf(bin_op.lhs);
- const lhs_mcv = try self.resolveInst(bin_op.lhs);
- const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv))
- lhs_mcv
- else
- try self.copyToRegisterWithInstTracking(inst, ty, lhs_mcv);
- const dst_reg = dst_mcv.register;
- const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
- defer self.register_manager.unlockReg(dst_lock);
-
- const rhs_mcv = try self.resolveInst(bin_op.rhs);
- const rhs_lock = switch (rhs_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
-
- const limit_reg = try self.register_manager.allocReg(null, gp);
- const limit_mcv = MCValue{ .register = limit_reg };
- const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg);
- defer self.register_manager.unlockReg(limit_lock);
-
- const reg_bits = self.regBitSize(ty);
- const cc: Condition = if (ty.isSignedInt()) cc: {
- try self.genSetReg(ty, limit_reg, dst_mcv);
- try self.genShiftBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
- try self.genBinOpMir(.xor, ty, limit_mcv, .{
- .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
- });
- break :cc .o;
- } else cc: {
- try self.genSetReg(ty, limit_reg, .{ .immediate = 0 });
- break :cc .c;
- };
- try self.genBinOpMir(.sub, ty, dst_mcv, rhs_mcv);
+ const lhs_mcv = try self.resolveInst(bin_op.lhs);
+ const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv))
+ lhs_mcv
+ else
+ try self.copyToRegisterWithInstTracking(inst, ty, lhs_mcv);
+ const dst_reg = dst_mcv.register;
+ const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
+ defer self.register_manager.unlockReg(dst_lock);
- const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2);
- try self.asmCmovccRegisterRegister(
- registerAlias(dst_reg, cmov_abi_size),
- registerAlias(limit_reg, cmov_abi_size),
- cc,
- );
- break :result dst_mcv;
+ const rhs_mcv = try self.resolveInst(bin_op.rhs);
+ const rhs_lock = switch (rhs_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
};
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const limit_reg = try self.register_manager.allocReg(null, gp);
+ const limit_mcv = MCValue{ .register = limit_reg };
+ const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg);
+ defer self.register_manager.unlockReg(limit_lock);
+
+ const reg_bits = self.regBitSize(ty);
+ const cc: Condition = if (ty.isSignedInt()) cc: {
+ try self.genSetReg(ty, limit_reg, dst_mcv);
+ try self.genShiftBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
+ try self.genBinOpMir(.xor, ty, limit_mcv, .{
+ .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
+ });
+ break :cc .o;
+ } else cc: {
+ try self.genSetReg(ty, limit_reg, .{ .immediate = 0 });
+ break :cc .c;
+ };
+ try self.genBinOpMir(.sub, ty, dst_mcv, rhs_mcv);
+
+ const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2);
+ try self.asmCmovccRegisterRegister(
+ registerAlias(dst_reg, cmov_abi_size),
+ registerAlias(limit_reg, cmov_abi_size),
+ cc,
+ );
+
+ return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const ty = self.air.typeOf(bin_op.lhs);
+ const ty = self.air.typeOf(bin_op.lhs);
- try self.spillRegisters(&.{ .rax, .rdx });
- const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx });
- defer for (reg_locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock);
+ try self.spillRegisters(&.{ .rax, .rdx });
+ const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx });
+ defer for (reg_locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock);
- const lhs_mcv = try self.resolveInst(bin_op.lhs);
- const lhs_lock = switch (lhs_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
+ const lhs_mcv = try self.resolveInst(bin_op.lhs);
+ const lhs_lock = switch (lhs_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
- const rhs_mcv = try self.resolveInst(bin_op.rhs);
- const rhs_lock = switch (rhs_mcv) {
- .register => |reg| self.register_manager.lockReg(reg),
- else => null,
- };
- defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
-
- const limit_reg = try self.register_manager.allocReg(null, gp);
- const limit_mcv = MCValue{ .register = limit_reg };
- const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg);
- defer self.register_manager.unlockReg(limit_lock);
-
- const reg_bits = self.regBitSize(ty);
- const cc: Condition = if (ty.isSignedInt()) cc: {
- try self.genSetReg(ty, limit_reg, lhs_mcv);
- try self.genBinOpMir(.xor, ty, limit_mcv, rhs_mcv);
- try self.genShiftBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
- try self.genBinOpMir(.xor, ty, limit_mcv, .{
- .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
- });
- break :cc .o;
- } else cc: {
- try self.genSetReg(ty, limit_reg, .{
- .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - reg_bits),
- });
- break :cc .c;
- };
+ const rhs_mcv = try self.resolveInst(bin_op.rhs);
+ const rhs_lock = switch (rhs_mcv) {
+ .register => |reg| self.register_manager.lockReg(reg),
+ else => null,
+ };
+ defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
- const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv);
- const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2);
- try self.asmCmovccRegisterRegister(
- registerAlias(dst_mcv.register, cmov_abi_size),
- registerAlias(limit_reg, cmov_abi_size),
- cc,
- );
- break :result dst_mcv;
+ const limit_reg = try self.register_manager.allocReg(null, gp);
+ const limit_mcv = MCValue{ .register = limit_reg };
+ const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg);
+ defer self.register_manager.unlockReg(limit_lock);
+
+ const reg_bits = self.regBitSize(ty);
+ const cc: Condition = if (ty.isSignedInt()) cc: {
+ try self.genSetReg(ty, limit_reg, lhs_mcv);
+ try self.genBinOpMir(.xor, ty, limit_mcv, rhs_mcv);
+ try self.genShiftBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
+ try self.genBinOpMir(.xor, ty, limit_mcv, .{
+ .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
+ });
+ break :cc .o;
+ } else cc: {
+ try self.genSetReg(ty, limit_reg, .{
+ .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - reg_bits),
+ });
+ break :cc .c;
};
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+
+ const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv);
+ const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2);
+ try self.asmCmovccRegisterRegister(
+ registerAlias(dst_mcv.register, cmov_abi_size),
+ registerAlias(limit_reg, cmov_abi_size),
+ cc,
+ );
+
+ return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const result: MCValue = result: {
const tag = self.air.instructions.items(.tag)[inst];
const ty = self.air.typeOf(bin_op.lhs);
switch (ty.zigTypeTag()) {
@@ -1929,7 +1965,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const result: MCValue = result: {
const lhs_ty = self.air.typeOf(bin_op.lhs);
const rhs_ty = self.air.typeOf(bin_op.rhs);
switch (lhs_ty.zigTypeTag()) {
@@ -2051,7 +2087,7 @@ fn genSetStackTruncatedOverflowCompare(
fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const result: MCValue = result: {
const dst_ty = self.air.typeOf(bin_op.lhs);
switch (dst_ty.zigTypeTag()) {
.Vector => return self.fail("TODO implement mul_with_overflow for Vector type", .{}),
@@ -2240,10 +2276,6 @@ fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCVa
fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
- }
-
try self.spillRegisters(&.{.rcx});
const tag = self.air.instructions.items(.tag)[inst];
@@ -2260,18 +2292,14 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst))
- .dead
- else
- return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ _ = bin_op;
+ return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch});
+ //return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .none;
-
const pl_ty = self.air.typeOfIndex(inst);
const opt_mcv = try self.resolveInst(ty_op.operand);
@@ -2296,18 +2324,15 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
- const dst_ty = self.air.typeOfIndex(inst);
- const opt_mcv = try self.resolveInst(ty_op.operand);
+ const dst_ty = self.air.typeOfIndex(inst);
+ const opt_mcv = try self.resolveInst(ty_op.operand);
- break :result if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv))
- opt_mcv
- else
- try self.copyToRegisterWithInstTracking(inst, dst_ty, opt_mcv);
- };
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ const dst_mcv = if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv))
+ opt_mcv
+ else
+ try self.copyToRegisterWithInstTracking(inst, dst_ty, opt_mcv);
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
@@ -2320,7 +2345,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
if (opt_ty.optionalReprIsPayload()) {
break :result if (self.liveness.isUnused(inst))
- .dead
+ .unreach
else if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
src_mcv
else
@@ -2339,16 +2364,13 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
Memory.sib(.byte, .{ .base = dst_mcv.register, .disp = pl_abi_size }),
Immediate.u(1),
);
- break :result if (self.liveness.isUnused(inst)) .dead else dst_mcv;
+ break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv;
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
- }
const err_union_ty = self.air.typeOf(ty_op.operand);
const err_ty = err_union_ty.errorUnionSet();
const payload_ty = err_union_ty.errorUnionPayload();
@@ -2391,9 +2413,6 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
- }
const err_union_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
const result = try self.genUnwrapErrorUnionPayloadMir(inst, err_union_ty, operand);
@@ -2444,72 +2463,68 @@ fn genUnwrapErrorUnionPayloadMir(
// *(E!T) -> E
fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
- const src_ty = self.air.typeOf(ty_op.operand);
- const src_mcv = try self.resolveInst(ty_op.operand);
- const src_reg = switch (src_mcv) {
- .register => |reg| reg,
- else => try self.copyToTmpRegister(src_ty, src_mcv),
- };
- const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
- defer self.register_manager.unlockReg(src_lock);
+ const src_ty = self.air.typeOf(ty_op.operand);
+ const src_mcv = try self.resolveInst(ty_op.operand);
+ const src_reg = switch (src_mcv) {
+ .register => |reg| reg,
+ else => try self.copyToTmpRegister(src_ty, src_mcv),
+ };
+ const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
+ defer self.register_manager.unlockReg(src_lock);
- const dst_reg = try self.register_manager.allocReg(inst, gp);
- const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
- defer self.register_manager.unlockReg(dst_lock);
+ const dst_reg = try self.register_manager.allocReg(inst, gp);
+ const dst_mcv = MCValue{ .register = dst_reg };
+ const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
+ defer self.register_manager.unlockReg(dst_lock);
- const eu_ty = src_ty.childType();
- const pl_ty = eu_ty.errorUnionPayload();
- const err_ty = eu_ty.errorUnionSet();
- const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*));
- const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*));
- try self.asmRegisterMemory(
- .mov,
- registerAlias(dst_reg, err_abi_size),
- Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{ .base = src_reg, .disp = err_off }),
- );
- break :result .{ .register = dst_reg };
- };
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ const eu_ty = src_ty.childType();
+ const pl_ty = eu_ty.errorUnionPayload();
+ const err_ty = eu_ty.errorUnionSet();
+ const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*));
+ const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*));
+ try self.asmRegisterMemory(
+ .mov,
+ registerAlias(dst_reg, err_abi_size),
+ Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{ .base = src_reg, .disp = err_off }),
+ );
+
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
// *(E!T) -> *T
fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
- const src_ty = self.air.typeOf(ty_op.operand);
- const src_mcv = try self.resolveInst(ty_op.operand);
- const src_reg = switch (src_mcv) {
- .register => |reg| reg,
- else => try self.copyToTmpRegister(src_ty, src_mcv),
- };
- const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
- defer self.register_manager.unlockReg(src_lock);
+ const src_ty = self.air.typeOf(ty_op.operand);
+ const src_mcv = try self.resolveInst(ty_op.operand);
+ const src_reg = switch (src_mcv) {
+ .register => |reg| reg,
+ else => try self.copyToTmpRegister(src_ty, src_mcv),
+ };
+ const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
+ defer self.register_manager.unlockReg(src_lock);
- const dst_ty = self.air.typeOfIndex(inst);
- const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
- src_reg
- else
- try self.register_manager.allocReg(inst, gp);
- const dst_lock = self.register_manager.lockReg(dst_reg);
- defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
+ const dst_ty = self.air.typeOfIndex(inst);
+ const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
+ src_reg
+ else
+ try self.register_manager.allocReg(inst, gp);
+ const dst_mcv = MCValue{ .register = dst_reg };
+ const dst_lock = self.register_manager.lockReg(dst_reg);
+ defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const eu_ty = src_ty.childType();
- const pl_ty = eu_ty.errorUnionPayload();
- const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*));
- const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
- try self.asmRegisterMemory(
- .lea,
- registerAlias(dst_reg, dst_abi_size),
- Memory.sib(.qword, .{ .base = src_reg, .disp = pl_off }),
- );
- break :result .{ .register = dst_reg };
- };
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ const eu_ty = src_ty.childType();
+ const pl_ty = eu_ty.errorUnionPayload();
+ const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*));
+ const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
+ try self.asmRegisterMemory(
+ .lea,
+ registerAlias(dst_reg, dst_abi_size),
+ Memory.sib(.qword, .{ .base = src_reg, .disp = pl_off }),
+ );
+
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
@@ -2535,7 +2550,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
Immediate.u(0),
);
- if (self.liveness.isUnused(inst)) break :result .dead;
+ if (self.liveness.isUnused(inst)) break :result .unreach;
const dst_ty = self.air.typeOfIndex(inst);
const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
@@ -2558,11 +2573,9 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
}
fn airErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void {
- const result: MCValue = if (self.liveness.isUnused(inst))
- .dead
- else
- return self.fail("TODO implement airErrReturnTrace for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ .none, .none, .none });
+ _ = inst;
+ return self.fail("TODO implement airErrReturnTrace for {}", .{self.target.cpu.arch});
+ //return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airSetErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void {
@@ -2578,8 +2591,6 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void {
fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
-
const pl_ty = self.air.typeOf(ty_op.operand);
if (!pl_ty.hasRuntimeBits()) break :result .{ .immediate = 1 };
@@ -2624,10 +2635,6 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
- }
-
const error_union_ty = self.air.getRefType(ty_op.ty);
const payload_ty = error_union_ty.errorUnionPayload();
const operand = try self.resolveInst(ty_op.operand);
@@ -2654,9 +2661,7 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
/// E to E!T
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
- }
+
const error_union_ty = self.air.getRefType(ty_op.ty);
const payload_ty = error_union_ty.errorUnionPayload();
const operand = try self.resolveInst(ty_op.operand);
@@ -2682,7 +2687,7 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result = if (self.liveness.isUnused(inst)) .dead else result: {
+ const result = result: {
const src_mcv = try self.resolveInst(ty_op.operand);
if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv;
@@ -2696,72 +2701,65 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(ty_op.operand);
- const dst_mcv: MCValue = blk: {
- switch (operand) {
- .stack_offset => |off| {
- break :blk MCValue{ .stack_offset = off - 8 };
- },
- else => return self.fail("TODO implement slice_len for {}", .{operand}),
- }
- };
- break :result dst_mcv;
+
+ const operand = try self.resolveInst(ty_op.operand);
+ const dst_mcv: MCValue = blk: {
+ switch (operand) {
+ .stack_offset => |off| {
+ break :blk MCValue{ .stack_offset = off - 8 };
+ },
+ else => return self.fail("TODO implement slice_len for {}", .{operand}),
+ }
};
- log.debug("airSliceLen(%{d}): {}", .{ inst, result });
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
- const src_ty = self.air.typeOf(ty_op.operand);
- const src_mcv = try self.resolveInst(ty_op.operand);
- const src_reg = switch (src_mcv) {
- .register => |reg| reg,
- else => try self.copyToTmpRegister(src_ty, src_mcv),
- };
- const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
- defer self.register_manager.unlockReg(src_lock);
+ const src_ty = self.air.typeOf(ty_op.operand);
+ const src_mcv = try self.resolveInst(ty_op.operand);
+ const src_reg = switch (src_mcv) {
+ .register => |reg| reg,
+ else => try self.copyToTmpRegister(src_ty, src_mcv),
+ };
+ const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
+ defer self.register_manager.unlockReg(src_lock);
- const dst_ty = self.air.typeOfIndex(inst);
- const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
- src_reg
- else
- try self.register_manager.allocReg(inst, gp);
- const dst_lock = self.register_manager.lockReg(dst_reg);
- defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
+ const dst_ty = self.air.typeOfIndex(inst);
+ const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
+ src_reg
+ else
+ try self.register_manager.allocReg(inst, gp);
+ const dst_mcv = MCValue{ .register = dst_reg };
+ const dst_lock = self.register_manager.lockReg(dst_reg);
+ defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
- try self.asmRegisterMemory(
- .lea,
- registerAlias(dst_reg, dst_abi_size),
- Memory.sib(.qword, .{
- .base = src_reg,
- .disp = @divExact(self.target.cpu.arch.ptrBitWidth(), 8),
- }),
- );
- break :result .{ .register = dst_reg };
- };
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
+ try self.asmRegisterMemory(
+ .lea,
+ registerAlias(dst_reg, dst_abi_size),
+ Memory.sib(.qword, .{
+ .base = src_reg,
+ .disp = @divExact(self.target.cpu.arch.ptrBitWidth(), 8),
+ }),
+ );
+
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
- const dst_ty = self.air.typeOfIndex(inst);
- const opt_mcv = try self.resolveInst(ty_op.operand);
+ const dst_ty = self.air.typeOfIndex(inst);
+ const opt_mcv = try self.resolveInst(ty_op.operand);
- break :result if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv))
- opt_mcv
- else
- try self.copyToRegisterWithInstTracking(inst, dst_ty, opt_mcv);
- };
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ const dst_mcv = if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv))
+ opt_mcv
+ else
+ try self.copyToRegisterWithInstTracking(inst, dst_ty, opt_mcv);
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Register {
@@ -2829,34 +2827,26 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const slice_ty = self.air.typeOf(bin_op.lhs);
- const result = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: {
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf);
- const elem_ptr = try self.genSliceElemPtr(bin_op.lhs, bin_op.rhs);
- const dst_mcv = try self.allocRegOrMem(inst, false);
- try self.load(dst_mcv, elem_ptr, slice_ptr_field_type);
- break :result dst_mcv;
- };
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+
+ var buf: Type.SlicePtrFieldTypeBuffer = undefined;
+ const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf);
+ const elem_ptr = try self.genSliceElemPtr(bin_op.lhs, bin_op.rhs);
+ const dst_mcv = try self.allocRegOrMem(inst, false);
+ try self.load(dst_mcv, elem_ptr, slice_ptr_field_type);
+
+ return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst))
- .dead
- else
- try self.genSliceElemPtr(extra.lhs, extra.rhs);
- return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
+ const dst_mcv = try self.genSliceElemPtr(extra.lhs, extra.rhs);
+ return self.finishAir(inst, dst_mcv, .{ extra.lhs, extra.rhs, .none });
}
fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
- }
-
const array_ty = self.air.typeOf(bin_op.lhs);
const array = try self.resolveInst(bin_op.lhs);
const array_lock: ?RegisterLock = switch (array) {
@@ -2923,77 +2913,74 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.air.typeOf(bin_op.lhs);
- const result = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: {
- // this is identical to the `airPtrElemPtr` codegen expect here an
- // additional `mov` is needed at the end to get the actual value
-
- const elem_ty = ptr_ty.elemType2();
- const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*));
- const index_ty = self.air.typeOf(bin_op.rhs);
- const index_mcv = try self.resolveInst(bin_op.rhs);
- const index_lock = switch (index_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (index_lock) |lock| self.register_manager.unlockReg(lock);
- const offset_reg = try self.elemOffset(index_ty, index_mcv, elem_abi_size);
- const offset_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
- defer self.register_manager.unlockReg(offset_lock);
+ // this is identical to the `airPtrElemPtr` codegen expect here an
+ // additional `mov` is needed at the end to get the actual value
- const ptr_mcv = try self.resolveInst(bin_op.lhs);
- const elem_ptr_reg = if (ptr_mcv.isRegister() and self.liveness.operandDies(inst, 0))
- ptr_mcv.register
- else
- try self.copyToTmpRegister(ptr_ty, ptr_mcv);
- const elem_ptr_lock = self.register_manager.lockRegAssumeUnused(elem_ptr_reg);
- defer self.register_manager.unlockReg(elem_ptr_lock);
- try self.asmRegisterRegister(.add, elem_ptr_reg, offset_reg);
+ const elem_ty = ptr_ty.elemType2();
+ const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*));
+ const index_ty = self.air.typeOf(bin_op.rhs);
+ const index_mcv = try self.resolveInst(bin_op.rhs);
+ const index_lock = switch (index_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (index_lock) |lock| self.register_manager.unlockReg(lock);
- const dst_mcv = try self.allocRegOrMem(inst, true);
- const dst_lock = switch (dst_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- try self.load(dst_mcv, .{ .register = elem_ptr_reg }, ptr_ty);
- break :result dst_mcv;
+ const offset_reg = try self.elemOffset(index_ty, index_mcv, elem_abi_size);
+ const offset_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
+ defer self.register_manager.unlockReg(offset_lock);
+
+ const ptr_mcv = try self.resolveInst(bin_op.lhs);
+ const elem_ptr_reg = if (ptr_mcv.isRegister() and self.liveness.operandDies(inst, 0))
+ ptr_mcv.register
+ else
+ try self.copyToTmpRegister(ptr_ty, ptr_mcv);
+ const elem_ptr_lock = self.register_manager.lockRegAssumeUnused(elem_ptr_reg);
+ defer self.register_manager.unlockReg(elem_ptr_lock);
+ try self.asmRegisterRegister(.add, elem_ptr_reg, offset_reg);
+
+ const dst_mcv = try self.allocRegOrMem(inst, true);
+ const dst_lock = switch (dst_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
};
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
+ try self.load(dst_mcv, .{ .register = elem_ptr_reg }, ptr_ty);
+
+ return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const result = if (self.liveness.isUnused(inst)) .dead else result: {
- const ptr_ty = self.air.typeOf(extra.lhs);
- const ptr = try self.resolveInst(extra.lhs);
- const ptr_lock: ?RegisterLock = switch (ptr) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
+ const ptr_ty = self.air.typeOf(extra.lhs);
+ const ptr = try self.resolveInst(extra.lhs);
+ const ptr_lock: ?RegisterLock = switch (ptr) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
- const elem_ty = ptr_ty.elemType2();
- const elem_abi_size = elem_ty.abiSize(self.target.*);
- const index_ty = self.air.typeOf(extra.rhs);
- const index = try self.resolveInst(extra.rhs);
- const index_lock: ?RegisterLock = switch (index) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (index_lock) |lock| self.register_manager.unlockReg(lock);
+ const elem_ty = ptr_ty.elemType2();
+ const elem_abi_size = elem_ty.abiSize(self.target.*);
+ const index_ty = self.air.typeOf(extra.rhs);
+ const index = try self.resolveInst(extra.rhs);
+ const index_lock: ?RegisterLock = switch (index) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (index_lock) |lock| self.register_manager.unlockReg(lock);
- const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size);
- const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
- defer self.register_manager.unlockReg(offset_reg_lock);
+ const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size);
+ const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
+ defer self.register_manager.unlockReg(offset_reg_lock);
- const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr);
- try self.genBinOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg });
- break :result dst_mcv;
- };
- return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
+ const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr);
+ try self.genBinOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg });
+
+ return self.finishAir(inst, dst_mcv, .{ extra.lhs, extra.rhs, .none });
}
fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
@@ -3035,9 +3022,6 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
- }
const tag_ty = self.air.typeOfIndex(inst);
const union_ty = self.air.typeOf(ty_op.operand);
@@ -3089,8 +3073,6 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
fn airClz(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
-
const dst_ty = self.air.typeOfIndex(inst);
const src_ty = self.air.typeOf(ty_op.operand);
@@ -3158,8 +3140,6 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
-
const dst_ty = self.air.typeOfIndex(inst);
const src_ty = self.air.typeOf(ty_op.operand);
const src_bits = src_ty.bitSize(self.target.*);
@@ -3216,8 +3196,6 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
fn airPopcount(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
-
const src_ty = self.air.typeOf(ty_op.operand);
const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*));
const src_mcv = try self.resolveInst(ty_op.operand);
@@ -3386,148 +3364,138 @@ fn byteSwap(self: *Self, inst: Air.Inst.Index, src_ty: Type, src_mcv: MCValue, m
fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
-
- const src_ty = self.air.typeOf(ty_op.operand);
- const src_mcv = try self.resolveInst(ty_op.operand);
- const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, true);
- switch (self.regExtraBits(src_ty)) {
- 0 => {},
- else => |extra| try self.genBinOpMir(
- if (src_ty.isSignedInt()) .sar else .shr,
- src_ty,
- dst_mcv,
- .{ .immediate = extra },
- ),
- }
- break :result dst_mcv;
- };
+ const src_ty = self.air.typeOf(ty_op.operand);
+ const src_mcv = try self.resolveInst(ty_op.operand);
+
+ const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, true);
+ switch (self.regExtraBits(src_ty)) {
+ 0 => {},
+ else => |extra| try self.genBinOpMir(
+ if (src_ty.isSignedInt()) .sar else .shr,
+ src_ty,
+ dst_mcv,
+ .{ .immediate = extra },
+ ),
+ }
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
- const src_ty = self.air.typeOf(ty_op.operand);
- const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*));
- const src_mcv = try self.resolveInst(ty_op.operand);
+ const src_ty = self.air.typeOf(ty_op.operand);
+ const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*));
+ const src_mcv = try self.resolveInst(ty_op.operand);
- const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, false);
- const dst_reg = dst_mcv.register;
- const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
- defer self.register_manager.unlockReg(dst_lock);
+ const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, false);
+ const dst_reg = dst_mcv.register;
+ const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
+ defer self.register_manager.unlockReg(dst_lock);
- const tmp_reg = try self.register_manager.allocReg(null, gp);
- const tmp_lock = self.register_manager.lockReg(tmp_reg);
- defer if (tmp_lock) |lock| self.register_manager.unlockReg(lock);
+ const tmp_reg = try self.register_manager.allocReg(null, gp);
+ const tmp_lock = self.register_manager.lockReg(tmp_reg);
+ defer if (tmp_lock) |lock| self.register_manager.unlockReg(lock);
- {
- const dst = registerAlias(dst_reg, src_abi_size);
- const tmp = registerAlias(tmp_reg, src_abi_size);
- const imm = if (src_abi_size > 4)
- try self.register_manager.allocReg(null, gp)
- else
- undefined;
+ {
+ const dst = registerAlias(dst_reg, src_abi_size);
+ const tmp = registerAlias(tmp_reg, src_abi_size);
+ const imm = if (src_abi_size > 4)
+ try self.register_manager.allocReg(null, gp)
+ else
+ undefined;
- const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - src_abi_size * 8);
- const imm_0000_1111 = Immediate.u(mask / 0b0001_0001);
- const imm_00_11 = Immediate.u(mask / 0b01_01);
- const imm_0_1 = Immediate.u(mask / 0b1_1);
+ const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - src_abi_size * 8);
+ const imm_0000_1111 = Immediate.u(mask / 0b0001_0001);
+ const imm_00_11 = Immediate.u(mask / 0b01_01);
+ const imm_0_1 = Immediate.u(mask / 0b1_1);
- // dst = temp1 = bswap(operand)
- try self.asmRegisterRegister(.mov, tmp, dst);
- // tmp = temp1
- try self.asmRegisterImmediate(.shr, dst, Immediate.u(4));
- // dst = temp1 >> 4
- if (src_abi_size > 4) {
- try self.asmRegisterImmediate(.mov, imm, imm_0000_1111);
- try self.asmRegisterRegister(.@"and", tmp, imm);
- try self.asmRegisterRegister(.@"and", dst, imm);
- } else {
- try self.asmRegisterImmediate(.@"and", tmp, imm_0000_1111);
- try self.asmRegisterImmediate(.@"and", dst, imm_0000_1111);
- }
- // tmp = temp1 & 0x0F...0F
- // dst = (temp1 >> 4) & 0x0F...0F
- try self.asmRegisterImmediate(.shl, tmp, Immediate.u(4));
- // tmp = (temp1 & 0x0F...0F) << 4
- try self.asmRegisterRegister(.@"or", dst, tmp);
- // dst = temp2 = ((temp1 >> 4) & 0x0F...0F) | ((temp1 & 0x0F...0F) << 4)
- try self.asmRegisterRegister(.mov, tmp, dst);
- // tmp = temp2
- try self.asmRegisterImmediate(.shr, dst, Immediate.u(2));
- // dst = temp2 >> 2
- if (src_abi_size > 4) {
- try self.asmRegisterImmediate(.mov, imm, imm_00_11);
- try self.asmRegisterRegister(.@"and", tmp, imm);
- try self.asmRegisterRegister(.@"and", dst, imm);
- } else {
- try self.asmRegisterImmediate(.@"and", tmp, imm_00_11);
- try self.asmRegisterImmediate(.@"and", dst, imm_00_11);
- }
- // tmp = temp2 & 0x33...33
- // dst = (temp2 >> 2) & 0x33...33
- try self.asmRegisterMemory(
- .lea,
- if (src_abi_size > 4) tmp.to64() else tmp.to32(),
- Memory.sib(.qword, .{
- .base = dst.to64(),
- .scale_index = .{ .index = tmp.to64(), .scale = 1 << 2 },
- }),
- );
- // tmp = temp3 = ((temp2 >> 2) & 0x33...33) + ((temp2 & 0x33...33) << 2)
- try self.asmRegisterRegister(.mov, dst, tmp);
- // dst = temp3
- try self.asmRegisterImmediate(.shr, tmp, Immediate.u(1));
- // tmp = temp3 >> 1
- if (src_abi_size > 4) {
- try self.asmRegisterImmediate(.mov, imm, imm_0_1);
- try self.asmRegisterRegister(.@"and", dst, imm);
- try self.asmRegisterRegister(.@"and", tmp, imm);
- } else {
- try self.asmRegisterImmediate(.@"and", dst, imm_0_1);
- try self.asmRegisterImmediate(.@"and", tmp, imm_0_1);
- }
- // dst = temp3 & 0x55...55
- // tmp = (temp3 >> 1) & 0x55...55
- try self.asmRegisterMemory(
- .lea,
- if (src_abi_size > 4) dst.to64() else dst.to32(),
- Memory.sib(.qword, .{
- .base = tmp.to64(),
- .scale_index = .{ .index = dst.to64(), .scale = 1 << 1 },
- }),
- );
- // dst = ((temp3 >> 1) & 0x55...55) + ((temp3 & 0x55...55) << 1)
+ // dst = temp1 = bswap(operand)
+ try self.asmRegisterRegister(.mov, tmp, dst);
+ // tmp = temp1
+ try self.asmRegisterImmediate(.shr, dst, Immediate.u(4));
+ // dst = temp1 >> 4
+ if (src_abi_size > 4) {
+ try self.asmRegisterImmediate(.mov, imm, imm_0000_1111);
+ try self.asmRegisterRegister(.@"and", tmp, imm);
+ try self.asmRegisterRegister(.@"and", dst, imm);
+ } else {
+ try self.asmRegisterImmediate(.@"and", tmp, imm_0000_1111);
+ try self.asmRegisterImmediate(.@"and", dst, imm_0000_1111);
}
-
- switch (self.regExtraBits(src_ty)) {
- 0 => {},
- else => |extra| try self.genBinOpMir(
- if (src_ty.isSignedInt()) .sar else .shr,
- src_ty,
- dst_mcv,
- .{ .immediate = extra },
- ),
+ // tmp = temp1 & 0x0F...0F
+ // dst = (temp1 >> 4) & 0x0F...0F
+ try self.asmRegisterImmediate(.shl, tmp, Immediate.u(4));
+ // tmp = (temp1 & 0x0F...0F) << 4
+ try self.asmRegisterRegister(.@"or", dst, tmp);
+ // dst = temp2 = ((temp1 >> 4) & 0x0F...0F) | ((temp1 & 0x0F...0F) << 4)
+ try self.asmRegisterRegister(.mov, tmp, dst);
+ // tmp = temp2
+ try self.asmRegisterImmediate(.shr, dst, Immediate.u(2));
+ // dst = temp2 >> 2
+ if (src_abi_size > 4) {
+ try self.asmRegisterImmediate(.mov, imm, imm_00_11);
+ try self.asmRegisterRegister(.@"and", tmp, imm);
+ try self.asmRegisterRegister(.@"and", dst, imm);
+ } else {
+ try self.asmRegisterImmediate(.@"and", tmp, imm_00_11);
+ try self.asmRegisterImmediate(.@"and", dst, imm_00_11);
}
- break :result dst_mcv;
- };
+ // tmp = temp2 & 0x33...33
+ // dst = (temp2 >> 2) & 0x33...33
+ try self.asmRegisterMemory(
+ .lea,
+ if (src_abi_size > 4) tmp.to64() else tmp.to32(),
+ Memory.sib(.qword, .{
+ .base = dst.to64(),
+ .scale_index = .{ .index = tmp.to64(), .scale = 1 << 2 },
+ }),
+ );
+ // tmp = temp3 = ((temp2 >> 2) & 0x33...33) + ((temp2 & 0x33...33) << 2)
+ try self.asmRegisterRegister(.mov, dst, tmp);
+ // dst = temp3
+ try self.asmRegisterImmediate(.shr, tmp, Immediate.u(1));
+ // tmp = temp3 >> 1
+ if (src_abi_size > 4) {
+ try self.asmRegisterImmediate(.mov, imm, imm_0_1);
+ try self.asmRegisterRegister(.@"and", dst, imm);
+ try self.asmRegisterRegister(.@"and", tmp, imm);
+ } else {
+ try self.asmRegisterImmediate(.@"and", dst, imm_0_1);
+ try self.asmRegisterImmediate(.@"and", tmp, imm_0_1);
+ }
+ // dst = temp3 & 0x55...55
+ // tmp = (temp3 >> 1) & 0x55...55
+ try self.asmRegisterMemory(
+ .lea,
+ if (src_abi_size > 4) dst.to64() else dst.to32(),
+ Memory.sib(.qword, .{
+ .base = tmp.to64(),
+ .scale_index = .{ .index = dst.to64(), .scale = 1 << 1 },
+ }),
+ );
+ // dst = ((temp3 >> 1) & 0x55...55) + ((temp3 & 0x55...55) << 1)
+ }
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ switch (self.regExtraBits(src_ty)) {
+ 0 => {},
+ else => |extra| try self.genBinOpMir(
+ if (src_ty.isSignedInt()) .sar else .shr,
+ src_ty,
+ dst_mcv,
+ .{ .immediate = extra },
+ ),
+ }
+
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result: MCValue = if (self.liveness.isUnused(inst))
- .dead
- else
- return self.fail("TODO implement airUnaryMath for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ un_op, .none, .none });
+ _ = un_op;
+ return self.fail("TODO implement airUnaryMath for {}", .{self.target.cpu.arch});
+ //return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn reuseOperand(
@@ -3559,10 +3527,7 @@ fn reuseOperand(
// Prevent the operand deaths processing code from deallocating it.
self.liveness.clearOperandDeath(inst, op_index);
-
- // That makes us responsible for doing the rest of the stuff that processDeath would have done.
- const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
- branch.inst_table.putAssumeCapacity(Air.refToIndex(operand).?, .dead);
+ self.getResolvedInstValue(Air.refToIndex(operand).?).reuse(self);
return true;
}
@@ -3703,9 +3668,6 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
const ptr = try self.resolveInst(ty_op.operand);
- const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr();
- if (self.liveness.isUnused(inst) and !is_volatile) break :result .dead;
-
const dst_mcv: MCValue = if (elem_size <= 8 and self.reuseOperand(inst, ty_op.operand, 0, ptr))
// The MCValue that holds the pointer can be re-used as the value.
ptr
@@ -4002,10 +3964,6 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
}
fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
- if (self.liveness.isUnused(inst)) {
- return MCValue.dead;
- }
-
const mcv = try self.resolveInst(operand);
const ptr_ty = self.air.typeOf(operand);
const container_ty = ptr_ty.childType();
@@ -4072,7 +4030,7 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32
fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const result: MCValue = result: {
const operand = extra.struct_operand;
const index = extra.field_index;
@@ -4220,11 +4178,9 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst))
- .dead
- else
- return self.fail("TODO implement airFieldParentPtr for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ _ = ty_op;
+ return self.fail("TODO implement airFieldParentPtr for {}", .{self.target.cpu.arch});
+ //return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue {
@@ -5443,9 +5399,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
const src_index = self.air.instructions.items(.data)[inst].arg.src_index;
const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index);
- const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
-
+ const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: {
const dst_mcv: MCValue = switch (mcv) {
.register => |reg| blk: {
self.register_manager.getRegAssumeFree(reg.to64(), inst);
@@ -5536,23 +5490,17 @@ fn airBreakpoint(self: *Self) !void {
}
fn airRetAddr(self: *Self, inst: Air.Inst.Index) !void {
- const result = if (self.liveness.isUnused(inst)) .dead else result: {
- const dst_mcv = try self.allocRegOrMem(inst, true);
- try self.setRegOrMem(Type.usize, dst_mcv, .{
- .stack_offset = -@as(i32, @divExact(self.target.cpu.arch.ptrBitWidth(), 8)),
- });
- break :result dst_mcv;
- };
- return self.finishAir(inst, result, .{ .none, .none, .none });
+ const dst_mcv = try self.allocRegOrMem(inst, true);
+ try self.setRegOrMem(Type.usize, dst_mcv, .{
+ .stack_offset = -@as(i32, @divExact(self.target.cpu.arch.ptrBitWidth(), 8)),
+ });
+ return self.finishAir(inst, dst_mcv, .{ .none, .none, .none });
}
fn airFrameAddress(self: *Self, inst: Air.Inst.Index) !void {
- const result = if (self.liveness.isUnused(inst)) .dead else result: {
- const dst_mcv = try self.allocRegOrMem(inst, true);
- try self.setRegOrMem(Type.usize, dst_mcv, .{ .register = .rbp });
- break :result dst_mcv;
- };
- return self.finishAir(inst, result, .{ .none, .none, .none });
+ const dst_mcv = try self.allocRegOrMem(inst, true);
+ try self.setRegOrMem(Type.usize, dst_mcv, .{ .register = .rbp });
+ return self.finishAir(inst, dst_mcv, .{ .none, .none, .none });
}
fn airFence(self: *Self, inst: Air.Inst.Index) !void {
@@ -5749,7 +5697,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}
const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
+ if (self.liveness.isUnused(inst)) break :result .unreach;
switch (info.return_value) {
.register => {
@@ -5771,12 +5719,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
std.mem.copy(Air.Inst.Ref, buf[1..], args);
return self.finishAir(inst, result, buf);
}
- var bt = try self.iterateBigTomb(inst, 1 + args.len);
- bt.feed(callee);
- for (args) |arg| {
- bt.feed(arg);
- }
- return bt.finishAir(result);
+ var bt = self.liveness.iterateBigTomb(inst);
+ self.feed(&bt, callee);
+ for (args) |arg| self.feed(&bt, arg);
+ return self.finishAirResult(inst, result);
}
fn airRet(self: *Self, inst: Air.Inst.Index) !void {
@@ -5799,7 +5745,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
// which is available if the jump is 127 bytes or less forward.
const jmp_reloc = try self.asmJmpReloc(undefined);
try self.exitlude_jump_relocs.append(self.gpa, jmp_reloc);
- return self.finishAir(inst, .dead, .{ un_op, .none, .none });
+ return self.finishAir(inst, .unreach, .{ un_op, .none, .none });
}
fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
@@ -5829,65 +5775,63 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
// which is available if the jump is 127 bytes or less forward.
const jmp_reloc = try self.asmJmpReloc(undefined);
try self.exitlude_jump_relocs.append(self.gpa, jmp_reloc);
- return self.finishAir(inst, .dead, .{ un_op, .none, .none });
+ return self.finishAir(inst, .unreach, .{ un_op, .none, .none });
}
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const ty = self.air.typeOf(bin_op.lhs);
- const ty_abi_size = ty.abiSize(self.target.*);
- const can_reuse = ty_abi_size <= 8;
+ const ty = self.air.typeOf(bin_op.lhs);
+ const ty_abi_size = ty.abiSize(self.target.*);
+ const can_reuse = ty_abi_size <= 8;
- try self.spillEflagsIfOccupied();
- self.eflags_inst = inst;
+ try self.spillEflagsIfOccupied();
+ self.eflags_inst = inst;
- const lhs_mcv = try self.resolveInst(bin_op.lhs);
- const lhs_lock = switch (lhs_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
+ const lhs_mcv = try self.resolveInst(bin_op.lhs);
+ const lhs_lock = switch (lhs_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
- const rhs_mcv = try self.resolveInst(bin_op.rhs);
- const rhs_lock = switch (rhs_mcv) {
- .register => |reg| self.register_manager.lockReg(reg),
- else => null,
- };
- defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
-
- const dst_mem_ok = !ty.isRuntimeFloat();
- var flipped = false;
- const dst_mcv: MCValue = if (can_reuse and !lhs_mcv.isImmediate() and
- (dst_mem_ok or lhs_mcv.isRegister()) and self.liveness.operandDies(inst, 0))
- lhs_mcv
- else if (can_reuse and !rhs_mcv.isImmediate() and
- (dst_mem_ok or rhs_mcv.isRegister()) and self.liveness.operandDies(inst, 1))
- dst: {
- flipped = true;
- break :dst rhs_mcv;
- } else if (dst_mem_ok) dst: {
- const dst_mcv = try self.allocTempRegOrMem(ty, true);
- try self.setRegOrMem(ty, dst_mcv, lhs_mcv);
- break :dst dst_mcv;
- } else .{ .register = try self.copyToTmpRegister(ty, lhs_mcv) };
- const dst_lock = switch (dst_mcv) {
- .register => |reg| self.register_manager.lockReg(reg),
- else => null,
- };
- defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
+ const rhs_mcv = try self.resolveInst(bin_op.rhs);
+ const rhs_lock = switch (rhs_mcv) {
+ .register => |reg| self.register_manager.lockReg(reg),
+ else => null,
+ };
+ defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const dst_mem_ok = !ty.isRuntimeFloat();
+ var flipped = false;
+ const dst_mcv: MCValue = if (can_reuse and !lhs_mcv.isImmediate() and
+ (dst_mem_ok or lhs_mcv.isRegister()) and self.liveness.operandDies(inst, 0))
+ lhs_mcv
+ else if (can_reuse and !rhs_mcv.isImmediate() and
+ (dst_mem_ok or rhs_mcv.isRegister()) and self.liveness.operandDies(inst, 1))
+ dst: {
+ flipped = true;
+ break :dst rhs_mcv;
+ } else if (dst_mem_ok) dst: {
+ const dst_mcv = try self.allocTempRegOrMem(ty, true);
+ try self.setRegOrMem(ty, dst_mcv, lhs_mcv);
+ break :dst dst_mcv;
+ } else .{ .register = try self.copyToTmpRegister(ty, lhs_mcv) };
+ const dst_lock = switch (dst_mcv) {
+ .register => |reg| self.register_manager.lockReg(reg),
+ else => null,
+ };
+ defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const src_mcv = if (flipped) lhs_mcv else rhs_mcv;
- try self.genBinOpMir(switch (ty.tag()) {
- else => .cmp,
- .f32 => .ucomiss,
- .f64 => .ucomisd,
- }, ty, dst_mcv, src_mcv);
+ const src_mcv = if (flipped) lhs_mcv else rhs_mcv;
+ try self.genBinOpMir(switch (ty.tag()) {
+ else => .cmp,
+ .f32 => .ucomiss,
+ .f64 => .ucomisd,
+ }, ty, dst_mcv, src_mcv);
- const signedness = if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned;
- break :result .{
- .eflags = Condition.fromCompareOperator(signedness, if (flipped) op.reverse() else op),
- };
+ const signedness = if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned;
+ const result = MCValue{
+ .eflags = Condition.fromCompareOperator(signedness, if (flipped) op.reverse() else op),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -5899,56 +5843,55 @@ fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void {
fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const addr_reg = try self.register_manager.allocReg(null, gp);
- const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
- defer self.register_manager.unlockReg(addr_lock);
-
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForLazySymbol(
- .{ .kind = .const_data, .ty = Type.anyerror },
- 4, // dword alignment
- );
- const got_addr = elf_file.getAtom(atom_index).getOffsetTableAddress(elf_file);
- try self.asmRegisterMemory(.mov, addr_reg.to64(), Memory.sib(.qword, .{
- .base = .ds,
- .disp = @intCast(i32, got_addr),
- }));
- } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom_index = try coff_file.getOrCreateAtomForLazySymbol(
- .{ .kind = .const_data, .ty = Type.anyerror },
- 4, // dword alignment
- );
- const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
- try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index });
- } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- const atom_index = try macho_file.getOrCreateAtomForLazySymbol(
- .{ .kind = .const_data, .ty = Type.anyerror },
- 4, // dword alignment
- );
- const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
- try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index });
- } else {
- return self.fail("TODO implement airErrorName for x86_64 {s}", .{@tagName(self.bin_file.tag)});
- }
- try self.spillEflagsIfOccupied();
- self.eflags_inst = inst;
+ const addr_reg = try self.register_manager.allocReg(null, gp);
+ const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
+ defer self.register_manager.unlockReg(addr_lock);
- const op_ty = self.air.typeOf(un_op);
- const op_abi_size = @intCast(u32, op_ty.abiSize(self.target.*));
- const op_mcv = try self.resolveInst(un_op);
- const dst_reg = switch (op_mcv) {
- .register => |reg| reg,
- else => try self.copyToTmpRegister(op_ty, op_mcv),
- };
- try self.asmRegisterMemory(
- .cmp,
- registerAlias(dst_reg, op_abi_size),
- Memory.sib(Memory.PtrSize.fromSize(op_abi_size), .{ .base = addr_reg }),
+ if (self.bin_file.cast(link.File.Elf)) |elf_file| {
+ const atom_index = try elf_file.getOrCreateAtomForLazySymbol(
+ .{ .kind = .const_data, .ty = Type.anyerror },
+ 4, // dword alignment
);
- break :result .{ .eflags = .b };
+ const got_addr = elf_file.getAtom(atom_index).getOffsetTableAddress(elf_file);
+ try self.asmRegisterMemory(.mov, addr_reg.to64(), Memory.sib(.qword, .{
+ .base = .ds,
+ .disp = @intCast(i32, got_addr),
+ }));
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
+ const atom_index = try coff_file.getOrCreateAtomForLazySymbol(
+ .{ .kind = .const_data, .ty = Type.anyerror },
+ 4, // dword alignment
+ );
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
+ try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index });
+ } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
+ const atom_index = try macho_file.getOrCreateAtomForLazySymbol(
+ .{ .kind = .const_data, .ty = Type.anyerror },
+ 4, // dword alignment
+ );
+ const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
+ try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index });
+ } else {
+ return self.fail("TODO implement airErrorName for x86_64 {s}", .{@tagName(self.bin_file.tag)});
+ }
+
+ try self.spillEflagsIfOccupied();
+ self.eflags_inst = inst;
+
+ const op_ty = self.air.typeOf(un_op);
+ const op_abi_size = @intCast(u32, op_ty.abiSize(self.target.*));
+ const op_mcv = try self.resolveInst(un_op);
+ const dst_reg = switch (op_mcv) {
+ .register => |reg| reg,
+ else => try self.copyToTmpRegister(op_ty, op_mcv),
};
+ try self.asmRegisterMemory(
+ .cmp,
+ registerAlias(dst_reg, op_abi_size),
+ Memory.sib(Memory.PtrSize.fromSize(op_abi_size), .{ .base = addr_reg }),
+ );
+ const result = MCValue{ .eflags = .b };
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -5957,9 +5900,8 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void {
const extra = self.air.extraData(Air.Try, pl_op.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
const err_union_ty = self.air.typeOf(pl_op.operand);
- const err_union = try self.resolveInst(pl_op.operand);
- const result = try self.genTry(inst, err_union, body, err_union_ty, false);
- return self.finishAir(inst, result, .{ pl_op.operand, .none, .none });
+ const result = try self.genTry(inst, pl_op.operand, body, err_union_ty, false);
+ return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void {
@@ -5967,15 +5909,14 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void {
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
const err_union_ty = self.air.typeOf(extra.data.ptr).childType();
- const err_union_ptr = try self.resolveInst(extra.data.ptr);
- const result = try self.genTry(inst, err_union_ptr, body, err_union_ty, true);
- return self.finishAir(inst, result, .{ extra.data.ptr, .none, .none });
+ const result = try self.genTry(inst, extra.data.ptr, body, err_union_ty, true);
+ return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn genTry(
self: *Self,
inst: Air.Inst.Index,
- err_union: MCValue,
+ err_union: Air.Inst.Ref,
body: []const Air.Inst.Index,
err_union_ty: Type,
operand_is_ptr: bool,
@@ -5983,14 +5924,37 @@ fn genTry(
if (operand_is_ptr) {
return self.fail("TODO genTry for pointers", .{});
}
- const is_err_mcv = try self.isErr(null, err_union_ty, err_union);
+ const liveness_cond_br = self.liveness.getCondBr(inst);
+
+ const err_union_mcv = try self.resolveInst(err_union);
+ const is_err_mcv = try self.isErr(null, err_union_ty, err_union_mcv);
+
const reloc = try self.genCondBrMir(Type.anyerror, is_err_mcv);
+
+ if (self.liveness.operandDies(inst, 0)) {
+ if (Air.refToIndex(err_union)) |err_union_inst| self.processDeath(err_union_inst);
+ }
+
+ self.scope_generation += 1;
+ const state = try self.saveState();
+
+ for (liveness_cond_br.else_deaths) |operand| self.processDeath(operand);
try self.genBody(body);
+ try self.restoreState(state, &.{}, .{
+ .emit_instructions = false,
+ .update_tracking = true,
+ .resurrect = true,
+ .close_scope = true,
+ });
+
try self.performReloc(reloc);
+
+ for (liveness_cond_br.then_deaths) |operand| self.processDeath(operand);
+
const result = if (self.liveness.isUnused(inst))
- .dead
+ .unreach
else
- try self.genUnwrapErrorUnionPayloadMir(inst, err_union_ty, err_union);
+ try self.genUnwrapErrorUnionPayloadMir(inst, err_union_ty, err_union_mcv);
return result;
}
@@ -6013,12 +5977,12 @@ fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void {
const function = self.air.values[ty_pl.payload].castTag(.function).?.data;
// TODO emit debug info for function change
_ = function;
- return self.finishAir(inst, .dead, .{ .none, .none, .none });
+ return self.finishAir(inst, .unreach, .{ .none, .none, .none });
}
fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void {
// TODO emit debug info lexical block
- return self.finishAir(inst, .dead, .{ .none, .none, .none });
+ return self.finishAir(inst, .unreach, .{ .none, .none, .none });
}
fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void {
@@ -6034,7 +5998,7 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void {
const tag = self.air.instructions.items(.tag)[inst];
try self.genVarDbgInfo(tag, ty, mcv, name);
- return self.finishAir(inst, .dead, .{ operand, .none, .none });
+ return self.finishAir(inst, .unreach, .{ operand, .none, .none });
}
fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 {
@@ -6071,7 +6035,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len];
const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
- const liveness_condbr = self.liveness.getCondBr(inst);
+ const liveness_cond_br = self.liveness.getCondBr(inst);
const reloc = try self.genCondBrMir(cond_ty, cond);
@@ -6082,60 +6046,37 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
if (Air.refToIndex(pl_op.operand)) |op_inst| self.processDeath(op_inst);
}
- // Capture the state of register and stack allocation state so that we can revert to it.
- const saved_state = self.captureState();
-
+ const outer_state = try self.saveState();
{
- try self.branch_stack.append(.{});
- errdefer _ = self.branch_stack.pop();
+ self.scope_generation += 1;
+ const inner_state = try self.saveState();
- try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len);
- for (liveness_condbr.then_deaths) |operand| {
- self.processDeath(operand);
- }
+ for (liveness_cond_br.then_deaths) |operand| self.processDeath(operand);
try self.genBody(then_body);
- }
-
- // Revert to the previous register and stack allocation state.
-
- var then_branch = self.branch_stack.pop();
- defer then_branch.deinit(self.gpa);
-
- self.revertState(saved_state);
-
- try self.performReloc(reloc);
+ try self.restoreState(inner_state, &.{}, .{
+ .emit_instructions = false,
+ .update_tracking = true,
+ .resurrect = true,
+ .close_scope = true,
+ });
- {
- try self.branch_stack.append(.{});
- errdefer _ = self.branch_stack.pop();
+ try self.performReloc(reloc);
- try self.ensureProcessDeathCapacity(liveness_condbr.else_deaths.len);
- for (liveness_condbr.else_deaths) |operand| {
- self.processDeath(operand);
- }
+ for (liveness_cond_br.else_deaths) |operand| self.processDeath(operand);
try self.genBody(else_body);
+ try self.restoreState(inner_state, &.{}, .{
+ .emit_instructions = false,
+ .update_tracking = true,
+ .resurrect = true,
+ .close_scope = true,
+ });
}
-
- var else_branch = self.branch_stack.pop();
- defer else_branch.deinit(self.gpa);
-
- // At this point, each branch will possibly have conflicting values for where
- // each instruction is stored. They agree, however, on which instructions are alive/dead.
- // We use the first ("then") branch as canonical, and here emit
- // instructions into the second ("else") branch to make it conform.
- // We continue respect the data structure semantic guarantees of the else_branch so
- // that we can use all the code emitting abstractions. This is why at the bottom we
- // assert that parent_branch.free_registers equals the saved_then_branch.free_registers
- // rather than assigning it.
- log.debug("airCondBr: %{d}", .{inst});
- log.debug("Upper branches:", .{});
- for (self.branch_stack.items) |bs| {
- log.debug("{}", .{bs.fmtDebug()});
- }
- log.debug("Then branch: {}", .{then_branch.fmtDebug()});
- log.debug("Else branch: {}", .{else_branch.fmtDebug()});
-
- try self.canonicaliseBranches(true, &then_branch, &else_branch, true, true);
+ try self.restoreState(outer_state, &.{}, .{
+ .emit_instructions = false,
+ .update_tracking = false,
+ .resurrect = false,
+ .close_scope = true,
+ });
// We already took care of pl_op.operand earlier, so we're going
// to pass .none here
@@ -6309,67 +6250,53 @@ fn isNonErr(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCVa
fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(un_op);
- const ty = self.air.typeOf(un_op);
- break :result try self.isNull(inst, ty, operand);
- };
+ const operand = try self.resolveInst(un_op);
+ const ty = self.air.typeOf(un_op);
+ const result = try self.isNull(inst, ty, operand);
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(un_op);
- const ty = self.air.typeOf(un_op);
- break :result try self.isNullPtr(inst, ty, operand);
- };
+ const operand = try self.resolveInst(un_op);
+ const ty = self.air.typeOf(un_op);
+ const result = try self.isNullPtr(inst, ty, operand);
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(un_op);
- const ty = self.air.typeOf(un_op);
- break :result switch (try self.isNull(inst, ty, operand)) {
- .eflags => |cc| .{ .eflags = cc.negate() },
- else => unreachable,
- };
+ const operand = try self.resolveInst(un_op);
+ const ty = self.air.typeOf(un_op);
+ const result = switch (try self.isNull(inst, ty, operand)) {
+ .eflags => |cc| .{ .eflags = cc.negate() },
+ else => unreachable,
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(un_op);
- const ty = self.air.typeOf(un_op);
- break :result switch (try self.isNullPtr(inst, ty, operand)) {
- .eflags => |cc| .{ .eflags = cc.negate() },
- else => unreachable,
- };
+ const operand = try self.resolveInst(un_op);
+ const ty = self.air.typeOf(un_op);
+ const result = switch (try self.isNullPtr(inst, ty, operand)) {
+ .eflags => |cc| .{ .eflags = cc.negate() },
+ else => unreachable,
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(un_op);
- const ty = self.air.typeOf(un_op);
- break :result try self.isErr(inst, ty, operand);
- };
+ const operand = try self.resolveInst(un_op);
+ const ty = self.air.typeOf(un_op);
+ const result = try self.isErr(inst, ty, operand);
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ un_op, .none, .none });
- }
-
const operand_ptr = try self.resolveInst(un_op);
const operand_ptr_lock: ?RegisterLock = switch (operand_ptr) {
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
@@ -6395,21 +6322,15 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const operand = try self.resolveInst(un_op);
- const ty = self.air.typeOf(un_op);
- break :result try self.isNonErr(inst, ty, operand);
- };
+ const operand = try self.resolveInst(un_op);
+ const ty = self.air.typeOf(un_op);
+ const result = try self.isNonErr(inst, ty, operand);
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- if (self.liveness.isUnused(inst)) {
- return self.finishAir(inst, .dead, .{ un_op, .none, .none });
- }
-
const operand_ptr = try self.resolveInst(un_op);
const operand_ptr_lock: ?RegisterLock = switch (operand_ptr) {
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
@@ -6439,103 +6360,61 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[loop.end..][0..loop.data.body_len];
const jmp_target = @intCast(u32, self.mir_instructions.len);
- const liveness_loop = self.liveness.getLoop(inst);
- {
- try self.branch_stack.append(.{});
- errdefer _ = self.branch_stack.pop();
-
- try self.genBody(body);
- }
-
- var branch = self.branch_stack.pop();
- defer branch.deinit(self.gpa);
-
- log.debug("airLoop: %{d}", .{inst});
- log.debug("Upper branches:", .{});
- for (self.branch_stack.items) |bs| {
- log.debug("{}", .{bs.fmtDebug()});
- }
- log.debug("Loop branch: {}", .{branch.fmtDebug()});
-
- var dummy_branch = Branch{};
- defer dummy_branch.deinit(self.gpa);
- try self.canonicaliseBranches(true, &dummy_branch, &branch, true, false);
+ self.scope_generation += 1;
+ const state = try self.saveState();
+ try self.genBody(body);
+ try self.restoreState(state, &.{}, .{
+ .emit_instructions = true,
+ .update_tracking = false,
+ .resurrect = false,
+ .close_scope = true,
+ });
_ = try self.asmJmpReloc(jmp_target);
- try self.ensureProcessDeathCapacity(liveness_loop.deaths.len);
- for (liveness_loop.deaths) |operand| {
- self.processDeath(operand);
- }
-
return self.finishAirBookkeeping();
}
fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
// A block is a setup to be able to jump to the end.
- const branch_depth = @intCast(u32, self.branch_stack.items.len);
- try self.blocks.putNoClobber(self.gpa, inst, .{ .branch_depth = branch_depth });
- defer {
- var block_data = self.blocks.fetchRemove(inst).?.value;
- block_data.deinit(self.gpa);
- }
-
- const ty = self.air.typeOfIndex(inst);
- const unused = !ty.hasRuntimeBitsIgnoreComptime() or self.liveness.isUnused(inst);
-
- {
- // Here we use `.none` to represent a null value so that the first break
- // instruction will choose a MCValue for the block result and overwrite
- // this field. Following break instructions will use that MCValue to put
- // their block results.
- const result: MCValue = if (unused) .dead else .none;
- const branch = &self.branch_stack.items[branch_depth - 1];
- try branch.inst_table.putNoClobber(self.gpa, inst, result);
- }
+ self.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(.unreach));
- {
- try self.branch_stack.append(.{});
- errdefer _ = self.branch_stack.pop();
-
- const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const extra = self.air.extraData(Air.Block, ty_pl.payload);
- const body = self.air.extra[extra.end..][0..extra.data.body_len];
- try self.genBody(body);
- }
+ self.scope_generation += 1;
+ try self.blocks.putNoClobber(self.gpa, inst, .{ .state = self.initRetroactiveState() });
+ const liveness = self.liveness.getBlock(inst);
- const block_data = self.blocks.getPtr(inst).?;
- const target_branch = self.branch_stack.pop();
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.Block, ty_pl.payload);
+ const body = self.air.extra[extra.end..][0..extra.data.body_len];
+ try self.genBody(body);
- log.debug("airBlock: %{d}", .{inst});
- log.debug("Upper branches:", .{});
- for (self.branch_stack.items) |bs| {
- log.debug("{}", .{bs.fmtDebug()});
+ var block_data = self.blocks.fetchRemove(inst).?;
+ defer block_data.value.deinit(self.gpa);
+ if (block_data.value.relocs.items.len > 0) {
+ try self.restoreState(block_data.value.state, liveness.deaths, .{
+ .emit_instructions = false,
+ .update_tracking = true,
+ .resurrect = true,
+ .close_scope = true,
+ });
+ for (block_data.value.relocs.items) |reloc| try self.performReloc(reloc);
}
- log.debug("Block branch: {}", .{block_data.branch.fmtDebug()});
- log.debug("Target branch: {}", .{target_branch.fmtDebug()});
-
- try self.canonicaliseBranches(true, &block_data.branch, &target_branch, false, false);
- for (block_data.relocs.items) |reloc| try self.performReloc(reloc);
-
- const result = if (unused) .dead else self.getResolvedInstValue(inst).?.*;
- self.getValue(result, inst);
+ const tracking = self.inst_tracking.getPtr(inst).?;
+ if (self.liveness.isUnused(inst)) tracking.die(self);
+ self.getValue(tracking.short, inst);
self.finishAirBookkeeping();
}
-fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
+fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const condition = try self.resolveInst(pl_op.operand);
const condition_ty = self.air.typeOf(pl_op.operand);
const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
var extra_index: usize = switch_br.end;
var case_i: u32 = 0;
- const liveness = try self.liveness.getSwitchBr(
- self.gpa,
- inst,
- switch_br.data.cases_len + 1,
- );
+ const liveness = try self.liveness.getSwitchBr(self.gpa, inst, switch_br.data.cases_len + 1);
defer self.gpa.free(liveness.deaths);
// If the condition dies here in this switch instruction, process
@@ -6545,186 +6424,69 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
if (Air.refToIndex(pl_op.operand)) |op_inst| self.processDeath(op_inst);
}
- log.debug("airSwitch: %{d}", .{inst});
- log.debug("Upper branches:", .{});
- for (self.branch_stack.items) |bs| {
- log.debug("{}", .{bs.fmtDebug()});
- }
-
- var prev_branch: ?Branch = null;
- defer if (prev_branch) |*branch| branch.deinit(self.gpa);
-
- // Capture the state of register and stack allocation state so that we can revert to it.
- const saved_state = self.captureState();
-
- const cases_len = switch_br.data.cases_len + @boolToInt(switch_br.data.else_body_len > 0);
- while (case_i < switch_br.data.cases_len) : (case_i += 1) {
- const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
- const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
- const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
- extra_index = case.end + items.len + case_body.len;
-
- // Revert to the previous register and stack allocation state.
- if (prev_branch) |_| self.revertState(saved_state);
-
- var relocs = try self.gpa.alloc(u32, items.len);
- defer self.gpa.free(relocs);
-
- for (items, relocs) |item, *reloc| {
- try self.spillEflagsIfOccupied();
- const item_mcv = try self.resolveInst(item);
- try self.genBinOpMir(.cmp, condition_ty, condition, item_mcv);
- reloc.* = try self.asmJccReloc(undefined, .ne);
- }
+ const outer_state = try self.saveState();
+ {
+ self.scope_generation += 1;
+ const inner_state = try self.saveState();
+
+ while (case_i < switch_br.data.cases_len) : (case_i += 1) {
+ const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
+ const items = @ptrCast(
+ []const Air.Inst.Ref,
+ self.air.extra[case.end..][0..case.data.items_len],
+ );
+ const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
+ extra_index = case.end + items.len + case_body.len;
- {
- if (cases_len > 1) try self.branch_stack.append(.{});
- errdefer _ = if (cases_len > 1) self.branch_stack.pop();
+ var relocs = try self.gpa.alloc(u32, items.len);
+ defer self.gpa.free(relocs);
- try self.ensureProcessDeathCapacity(liveness.deaths[case_i].len);
- for (liveness.deaths[case_i]) |operand| {
- self.processDeath(operand);
+ for (items, relocs) |item, *reloc| {
+ try self.spillEflagsIfOccupied();
+ const item_mcv = try self.resolveInst(item);
+ try self.genBinOpMir(.cmp, condition_ty, condition, item_mcv);
+ reloc.* = try self.asmJccReloc(undefined, .ne);
}
- try self.genBody(case_body);
- }
+ for (liveness.deaths[case_i]) |operand| self.processDeath(operand);
- // Consolidate returned MCValues between prongs like we do in airCondBr.
- if (cases_len > 1) {
- var case_branch = self.branch_stack.pop();
- errdefer case_branch.deinit(self.gpa);
+ try self.genBody(case_body);
+ try self.restoreState(inner_state, &.{}, .{
+ .emit_instructions = false,
+ .update_tracking = true,
+ .resurrect = true,
+ .close_scope = true,
+ });
- log.debug("Case-{d} branch: {}", .{ case_i, case_branch.fmtDebug() });
- const final = case_i == cases_len - 1;
- if (prev_branch) |*canon_branch| {
- try self.canonicaliseBranches(final, canon_branch, &case_branch, true, true);
- canon_branch.deinit(self.gpa);
- }
- prev_branch = case_branch;
+ for (relocs) |reloc| try self.performReloc(reloc);
}
- for (relocs) |reloc| try self.performReloc(reloc);
- }
-
- if (switch_br.data.else_body_len > 0) {
- const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len];
-
- // Revert to the previous register and stack allocation state.
- if (prev_branch) |_| self.revertState(saved_state);
-
- {
- if (cases_len > 1) try self.branch_stack.append(.{});
- errdefer _ = if (cases_len > 1) self.branch_stack.pop();
+ if (switch_br.data.else_body_len > 0) {
+ const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len];
const else_deaths = liveness.deaths.len - 1;
- try self.ensureProcessDeathCapacity(liveness.deaths[else_deaths].len);
- for (liveness.deaths[else_deaths]) |operand| {
- self.processDeath(operand);
- }
+ for (liveness.deaths[else_deaths]) |operand| self.processDeath(operand);
try self.genBody(else_body);
- }
-
- // Consolidate returned MCValues between a prong and the else branch like we do in airCondBr.
- if (cases_len > 1) {
- var else_branch = self.branch_stack.pop();
- errdefer else_branch.deinit(self.gpa);
-
- log.debug("Else branch: {}", .{else_branch.fmtDebug()});
- if (prev_branch) |*canon_branch| {
- try self.canonicaliseBranches(true, canon_branch, &else_branch, true, true);
- canon_branch.deinit(self.gpa);
- }
- prev_branch = else_branch;
+ try self.restoreState(inner_state, &.{}, .{
+ .emit_instructions = false,
+ .update_tracking = true,
+ .resurrect = true,
+ .close_scope = true,
+ });
}
}
+ try self.restoreState(outer_state, &.{}, .{
+ .emit_instructions = false,
+ .update_tracking = false,
+ .resurrect = false,
+ .close_scope = true,
+ });
// We already took care of pl_op.operand earlier, so we're going to pass .none here
return self.finishAir(inst, .unreach, .{ .none, .none, .none });
}
-fn canonicaliseBranches(
- self: *Self,
- update_parent: bool,
- canon_branch: *Branch,
- target_branch: *const Branch,
- comptime set_values: bool,
- comptime assert_same_deaths: bool,
-) !void {
- var hazard_map = std.AutoHashMap(MCValue, void).init(self.gpa);
- defer hazard_map.deinit();
-
- const parent_branch =
- if (update_parent) &self.branch_stack.items[self.branch_stack.items.len - 1] else undefined;
-
- if (update_parent) try self.ensureProcessDeathCapacity(target_branch.inst_table.count());
- var target_it = target_branch.inst_table.iterator();
- while (target_it.next()) |target_entry| {
- const target_key = target_entry.key_ptr.*;
- const target_value = target_entry.value_ptr.*;
- const canon_mcv = if (canon_branch.inst_table.fetchSwapRemove(target_key)) |canon_entry| blk: {
- // The instruction's MCValue is overridden in both branches.
- if (target_value == .dead) {
- if (update_parent) {
- parent_branch.inst_table.putAssumeCapacity(target_key, .dead);
- }
- if (assert_same_deaths) assert(canon_entry.value == .dead);
- continue;
- }
- if (update_parent) {
- parent_branch.inst_table.putAssumeCapacity(target_key, canon_entry.value);
- }
- break :blk canon_entry.value;
- } else blk: {
- if (target_value == .dead) {
- if (update_parent) {
- parent_branch.inst_table.putAssumeCapacity(target_key, .dead);
- }
- continue;
- }
- // The instruction is only overridden in the else branch.
- // If integer overflow occurs, the question is: why wasn't the instruction marked dead?
- break :blk self.getResolvedInstValue(target_key).?.*;
- };
- log.debug("consolidating target_entry %{d} {}=>{}", .{ target_key, target_value, canon_mcv });
- // TODO handle the case where the destination stack offset / register has something
- // going on there.
- assert(!hazard_map.contains(target_value));
- try hazard_map.putNoClobber(canon_mcv, {});
- if (set_values) {
- try self.setRegOrMem(self.air.typeOfIndex(target_key), canon_mcv, target_value);
- } else self.getValue(canon_mcv, target_key);
- self.freeValue(target_value);
- // TODO track the new register / stack allocation
- }
-
- if (update_parent) try self.ensureProcessDeathCapacity(canon_branch.inst_table.count());
- var canon_it = canon_branch.inst_table.iterator();
- while (canon_it.next()) |canon_entry| {
- const canon_key = canon_entry.key_ptr.*;
- const canon_value = canon_entry.value_ptr.*;
- // We already deleted the items from this table that matched the target_branch.
- // So these are all instructions that are only overridden in the canon branch.
- const parent_mcv =
- if (canon_value != .dead) self.getResolvedInstValue(canon_key).?.* else undefined;
- if (canon_value != .dead) {
- log.debug("consolidating canon_entry %{d} {}=>{}", .{ canon_key, parent_mcv, canon_value });
- // TODO handle the case where the destination stack offset / register has something
- // going on there.
- assert(!hazard_map.contains(parent_mcv));
- try hazard_map.putNoClobber(canon_value, {});
- if (set_values) {
- try self.setRegOrMem(self.air.typeOfIndex(canon_key), canon_value, parent_mcv);
- } else self.getValue(canon_value, canon_key);
- self.freeValue(parent_mcv);
- // TODO track the new register / stack allocation
- }
- if (update_parent) {
- parent_branch.inst_table.putAssumeCapacity(canon_key, canon_value);
- }
- }
-}
-
fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void {
const next_inst = @intCast(u32, self.mir_instructions.len);
switch (self.mir_instructions.items(.tag)[reloc]) {
@@ -6740,76 +6502,53 @@ fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void {
fn airBr(self: *Self, inst: Air.Inst.Index) !void {
const br = self.air.instructions.items(.data)[inst].br;
- const block = br.block_inst;
-
- // The first break instruction encounters `.none` here and chooses a
- // machine code value for the block result, populating this field.
- // Following break instructions encounter that value and use it for
- // the location to store their block results.
- if (self.getResolvedInstValue(block)) |dst_mcv| {
- const src_mcv = try self.resolveInst(br.operand);
- switch (dst_mcv.*) {
- .none => {
- const result = result: {
- if (self.reuseOperand(inst, br.operand, 0, src_mcv)) break :result src_mcv;
-
- const new_mcv = try self.allocRegOrMem(block, true);
- try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, src_mcv);
- break :result new_mcv;
- };
- dst_mcv.* = result;
- self.freeValue(result);
- },
- else => try self.setRegOrMem(self.air.typeOfIndex(block), dst_mcv.*, src_mcv),
- }
- }
+ const src_mcv = try self.resolveInst(br.operand);
+
+ const block_ty = self.air.typeOfIndex(br.block_inst);
+ const block_unused =
+ !block_ty.hasRuntimeBitsIgnoreComptime() or self.liveness.isUnused(br.block_inst);
+ const block_tracking = self.inst_tracking.getPtr(br.block_inst).?;
+ const block_data = self.blocks.getPtr(br.block_inst).?;
+
+ if (block_data.relocs.items.len == 0) {
+ block_tracking.* = InstTracking.init(result: {
+ if (block_unused) break :result .none;
+ if (self.reuseOperand(inst, br.operand, 0, src_mcv)) {
+ // Fix instruction tracking
+ switch (src_mcv) {
+ .register => |reg| if (RegisterManager.indexOfRegIntoTracked(reg)) |index| {
+ self.register_manager.registers[index] = br.block_inst;
+ },
+ else => {},
+ }
+ break :result src_mcv;
+ }
+
+ const new_mcv = try self.allocRegOrMem(br.block_inst, true);
+ try self.setRegOrMem(block_ty, new_mcv, src_mcv);
+ break :result new_mcv;
+ });
+ } else if (!block_unused) try self.setRegOrMem(block_ty, block_tracking.short, src_mcv);
- // Process operand death early so that it is properly accounted for in the Branch below.
+ // Process operand death so that it is properly accounted for in the State below.
if (self.liveness.operandDies(inst, 0)) {
if (Air.refToIndex(br.operand)) |op_inst| self.processDeath(op_inst);
}
- const block_data = self.blocks.getPtr(block).?;
- {
- var branch = Branch{};
- errdefer branch.deinit(self.gpa);
-
- var branch_i = self.branch_stack.items.len - 1;
- while (branch_i >= block_data.branch_depth) : (branch_i -= 1) {
- const table = &self.branch_stack.items[branch_i].inst_table;
- try branch.inst_table.ensureUnusedCapacity(self.gpa, table.count());
- var it = table.iterator();
- while (it.next()) |entry| {
- // This loop could be avoided by tracking inst depth, which
- // will be needed later anyway for reusing loop deaths.
- var parent_branch_i = block_data.branch_depth - 1;
- while (parent_branch_i > 0) : (parent_branch_i -= 1) {
- const parent_table = &self.branch_stack.items[parent_branch_i].inst_table;
- if (parent_table.contains(entry.key_ptr.*)) break;
- } else continue;
- const gop = branch.inst_table.getOrPutAssumeCapacity(entry.key_ptr.*);
- if (!gop.found_existing) gop.value_ptr.* = entry.value_ptr.*;
- }
- }
-
- log.debug("airBr: %{d}", .{inst});
- log.debug("Upper branches:", .{});
- for (self.branch_stack.items) |bs| {
- log.debug("{}", .{bs.fmtDebug()});
- }
- log.debug("Prev branch: {}", .{block_data.branch.fmtDebug()});
- log.debug("Cur branch: {}", .{branch.fmtDebug()});
-
- try self.canonicaliseBranches(false, &block_data.branch, &branch, true, false);
- block_data.branch.deinit(self.gpa);
- block_data.branch = branch;
- }
+ if (block_data.relocs.items.len == 0) {
+ try self.saveRetroactiveState(&block_data.state);
+ block_tracking.die(self);
+ } else try self.restoreState(block_data.state, &.{}, .{
+ .emit_instructions = true,
+ .update_tracking = false,
+ .resurrect = false,
+ .close_scope = false,
+ });
// Emit a jump with a relocation. It will be patched up after the block ends.
- try block_data.relocs.ensureUnusedCapacity(self.gpa, 1);
// Leave the jump offset undefined
const jmp_reloc = try self.asmJmpReloc(undefined);
- block_data.relocs.appendAssumeCapacity(jmp_reloc);
+ try block_data.relocs.append(self.gpa, jmp_reloc);
self.finishAirBookkeeping();
}
@@ -6817,7 +6556,6 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
- const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
const clobbers_len = @truncate(u31, extra.data.flags);
var extra_i: usize = extra.end;
const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
@@ -6826,216 +6564,214 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
extra_i += inputs.len;
var result: MCValue = .none;
- if (!is_volatile and self.liveness.isUnused(inst)) result = .dead else {
- var args = std.StringArrayHashMap(MCValue).init(self.gpa);
- try args.ensureTotalCapacity(outputs.len + inputs.len + clobbers_len);
- defer {
- for (args.values()) |arg| switch (arg) {
- .register => |reg| self.register_manager.unlockReg(.{ .register = reg }),
- else => {},
- };
- args.deinit();
- }
+ var args = std.StringArrayHashMap(MCValue).init(self.gpa);
+ try args.ensureTotalCapacity(outputs.len + inputs.len + clobbers_len);
+ defer {
+ for (args.values()) |arg| switch (arg) {
+ .register => |reg| self.register_manager.unlockReg(.{ .register = reg }),
+ else => {},
+ };
+ args.deinit();
+ }
+
+ if (outputs.len > 1) {
+ return self.fail("TODO implement codegen for asm with more than 1 output", .{});
+ }
- if (outputs.len > 1) {
- return self.fail("TODO implement codegen for asm with more than 1 output", .{});
+ for (outputs) |output| {
+ if (output != .none) {
+ return self.fail("TODO implement codegen for non-expr asm", .{});
+ }
+ const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
+ const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
+ const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
+ // This equation accounts for the fact that even if we have exactly 4 bytes
+ // for the string, we still use the next u32 for the null terminator.
+ extra_i += (constraint.len + name.len + (2 + 3)) / 4;
+
+ const mcv: MCValue = if (mem.eql(u8, constraint, "=r"))
+ .{ .register = self.register_manager.tryAllocReg(inst, gp) orelse
+ return self.fail("ran out of registers lowering inline asm", .{}) }
+ else if (mem.startsWith(u8, constraint, "={") and mem.endsWith(u8, constraint, "}"))
+ .{ .register = parseRegName(constraint["={".len .. constraint.len - "}".len]) orelse
+ return self.fail("unrecognized register constraint: '{s}'", .{constraint}) }
+ else
+ return self.fail("unrecognized constraint: '{s}'", .{constraint});
+ args.putAssumeCapacity(name, mcv);
+ switch (mcv) {
+ .register => |reg| _ = if (RegisterManager.indexOfRegIntoTracked(reg)) |_|
+ self.register_manager.lockRegAssumeUnused(reg),
+ else => {},
}
+ if (output == .none) result = mcv;
+ }
- for (outputs) |output| {
- if (output != .none) {
- return self.fail("TODO implement codegen for non-expr asm", .{});
- }
- const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
- const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
- const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
- // This equation accounts for the fact that even if we have exactly 4 bytes
- // for the string, we still use the next u32 for the null terminator.
- extra_i += (constraint.len + name.len + (2 + 3)) / 4;
-
- const mcv: MCValue = if (mem.eql(u8, constraint, "=r"))
- .{ .register = self.register_manager.tryAllocReg(inst, gp) orelse
- return self.fail("ran out of registers lowering inline asm", .{}) }
- else if (mem.startsWith(u8, constraint, "={") and mem.endsWith(u8, constraint, "}"))
- .{ .register = parseRegName(constraint["={".len .. constraint.len - "}".len]) orelse
- return self.fail("unrecognized register constraint: '{s}'", .{constraint}) }
- else
- return self.fail("unrecognized constraint: '{s}'", .{constraint});
- args.putAssumeCapacity(name, mcv);
- switch (mcv) {
- .register => |reg| _ = if (RegisterManager.indexOfRegIntoTracked(reg)) |_|
- self.register_manager.lockRegAssumeUnused(reg),
- else => {},
- }
- if (output == .none) result = mcv;
+ for (inputs) |input| {
+ const input_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
+ const constraint = std.mem.sliceTo(input_bytes, 0);
+ const name = std.mem.sliceTo(input_bytes[constraint.len + 1 ..], 0);
+ // This equation accounts for the fact that even if we have exactly 4 bytes
+ // for the string, we still use the next u32 for the null terminator.
+ extra_i += (constraint.len + name.len + (2 + 3)) / 4;
+
+ if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') {
+ return self.fail("unrecognized asm input constraint: '{s}'", .{constraint});
}
+ const reg_name = constraint[1 .. constraint.len - 1];
+ const reg = parseRegName(reg_name) orelse
+ return self.fail("unrecognized register: '{s}'", .{reg_name});
+
+ const arg_mcv = try self.resolveInst(input);
+ try self.register_manager.getReg(reg, null);
+ try self.genSetReg(self.air.typeOf(input), reg, arg_mcv);
+ }
- for (inputs) |input| {
- const input_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
- const constraint = std.mem.sliceTo(input_bytes, 0);
- const name = std.mem.sliceTo(input_bytes[constraint.len + 1 ..], 0);
+ {
+ var clobber_i: u32 = 0;
+ while (clobber_i < clobbers_len) : (clobber_i += 1) {
+ const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
- extra_i += (constraint.len + name.len + (2 + 3)) / 4;
-
- if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') {
- return self.fail("unrecognized asm input constraint: '{s}'", .{constraint});
- }
- const reg_name = constraint[1 .. constraint.len - 1];
- const reg = parseRegName(reg_name) orelse
- return self.fail("unrecognized register: '{s}'", .{reg_name});
+ extra_i += clobber.len / 4 + 1;
- const arg_mcv = try self.resolveInst(input);
- try self.register_manager.getReg(reg, null);
- try self.genSetReg(self.air.typeOf(input), reg, arg_mcv);
- }
-
- {
- var clobber_i: u32 = 0;
- while (clobber_i < clobbers_len) : (clobber_i += 1) {
- const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
- // This equation accounts for the fact that even if we have exactly 4 bytes
- // for the string, we still use the next u32 for the null terminator.
- extra_i += clobber.len / 4 + 1;
-
- // TODO honor these
- }
+ // TODO honor these
}
+ }
- const asm_source = mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len];
- var line_it = mem.tokenize(u8, asm_source, "\n\r;");
- while (line_it.next()) |line| {
- var mnem_it = mem.tokenize(u8, line, " \t");
- const mnem_str = mnem_it.next() orelse continue;
- if (mem.startsWith(u8, mnem_str, "#")) continue;
-
- const mnem_size: ?Memory.PtrSize = if (mem.endsWith(u8, mnem_str, "b"))
- .byte
- else if (mem.endsWith(u8, mnem_str, "w"))
- .word
- else if (mem.endsWith(u8, mnem_str, "l"))
- .dword
- else if (mem.endsWith(u8, mnem_str, "q"))
- .qword
- else
- null;
- const mnem = std.meta.stringToEnum(Mir.Inst.Tag, mnem_str) orelse
- (if (mnem_size) |_|
- std.meta.stringToEnum(Mir.Inst.Tag, mnem_str[0 .. mnem_str.len - 1])
- else
- null) orelse return self.fail("Invalid mnemonic: '{s}'", .{mnem_str});
-
- var op_it = mem.tokenize(u8, mnem_it.rest(), ",");
- var ops = [1]encoder.Instruction.Operand{.none} ** 4;
- for (&ops) |*op| {
- const op_str = mem.trim(u8, op_it.next() orelse break, " \t");
- if (mem.startsWith(u8, op_str, "#")) break;
- if (mem.startsWith(u8, op_str, "%%")) {
- const colon = mem.indexOfScalarPos(u8, op_str, "%%".len + 2, ':');
- const reg = parseRegName(op_str["%%".len .. colon orelse op_str.len]) orelse
- return self.fail("Invalid register: '{s}'", .{op_str});
- if (colon) |colon_pos| {
- const disp = std.fmt.parseInt(i32, op_str[colon_pos + 1 ..], 0) catch
- return self.fail("Invalid displacement: '{s}'", .{op_str});
- op.* = .{ .mem = Memory.sib(
- mnem_size orelse return self.fail("Unknown size: '{s}'", .{op_str}),
- .{ .base = reg, .disp = disp },
- ) };
- } else {
- if (mnem_size) |size| if (reg.bitSize() != size.bitSize())
- return self.fail("Invalid register size: '{s}'", .{op_str});
- op.* = .{ .reg = reg };
+ const asm_source = mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len];
+ var line_it = mem.tokenize(u8, asm_source, "\n\r;");
+ while (line_it.next()) |line| {
+ var mnem_it = mem.tokenize(u8, line, " \t");
+ const mnem_str = mnem_it.next() orelse continue;
+ if (mem.startsWith(u8, mnem_str, "#")) continue;
+
+ const mnem_size: ?Memory.PtrSize = if (mem.endsWith(u8, mnem_str, "b"))
+ .byte
+ else if (mem.endsWith(u8, mnem_str, "w"))
+ .word
+ else if (mem.endsWith(u8, mnem_str, "l"))
+ .dword
+ else if (mem.endsWith(u8, mnem_str, "q"))
+ .qword
+ else
+ null;
+ const mnem = std.meta.stringToEnum(Mir.Inst.Tag, mnem_str) orelse
+ (if (mnem_size) |_|
+ std.meta.stringToEnum(Mir.Inst.Tag, mnem_str[0 .. mnem_str.len - 1])
+ else
+ null) orelse return self.fail("Invalid mnemonic: '{s}'", .{mnem_str});
+
+ var op_it = mem.tokenize(u8, mnem_it.rest(), ",");
+ var ops = [1]encoder.Instruction.Operand{.none} ** 4;
+ for (&ops) |*op| {
+ const op_str = mem.trim(u8, op_it.next() orelse break, " \t");
+ if (mem.startsWith(u8, op_str, "#")) break;
+ if (mem.startsWith(u8, op_str, "%%")) {
+ const colon = mem.indexOfScalarPos(u8, op_str, "%%".len + 2, ':');
+ const reg = parseRegName(op_str["%%".len .. colon orelse op_str.len]) orelse
+ return self.fail("Invalid register: '{s}'", .{op_str});
+ if (colon) |colon_pos| {
+ const disp = std.fmt.parseInt(i32, op_str[colon_pos + 1 ..], 0) catch
+ return self.fail("Invalid displacement: '{s}'", .{op_str});
+ op.* = .{ .mem = Memory.sib(
+ mnem_size orelse return self.fail("Unknown size: '{s}'", .{op_str}),
+ .{ .base = reg, .disp = disp },
+ ) };
+ } else {
+ if (mnem_size) |size| if (reg.bitSize() != size.bitSize())
+ return self.fail("Invalid register size: '{s}'", .{op_str});
+ op.* = .{ .reg = reg };
+ }
+ } else if (mem.startsWith(u8, op_str, "%[") and mem.endsWith(u8, op_str, "]")) {
+ switch (args.get(op_str["%[".len .. op_str.len - "]".len]) orelse
+ return self.fail("No matching constraint: '{s}'", .{op_str})) {
+ .register => |reg| op.* = .{ .reg = reg },
+ else => return self.fail("Invalid constraint: '{s}'", .{op_str}),
+ }
+ } else if (mem.startsWith(u8, op_str, "$")) {
+ if (std.fmt.parseInt(i32, op_str["$".len..], 0)) |s| {
+ if (mnem_size) |size| {
+ const max = @as(u64, math.maxInt(u64)) >>
+ @intCast(u6, 64 - (size.bitSize() - 1));
+ if ((if (s < 0) ~s else s) > max)
+ return self.fail("Invalid immediate size: '{s}'", .{op_str});
}
- } else if (mem.startsWith(u8, op_str, "%[") and mem.endsWith(u8, op_str, "]")) {
- switch (args.get(op_str["%[".len .. op_str.len - "]".len]) orelse
- return self.fail("No matching constraint: '{s}'", .{op_str})) {
- .register => |reg| op.* = .{ .reg = reg },
- else => return self.fail("Invalid constraint: '{s}'", .{op_str}),
+ op.* = .{ .imm = Immediate.s(s) };
+ } else |_| if (std.fmt.parseInt(u64, op_str["$".len..], 0)) |u| {
+ if (mnem_size) |size| {
+ const max = @as(u64, math.maxInt(u64)) >>
+ @intCast(u6, 64 - size.bitSize());
+ if (u > max)
+ return self.fail("Invalid immediate size: '{s}'", .{op_str});
}
- } else if (mem.startsWith(u8, op_str, "$")) {
- if (std.fmt.parseInt(i32, op_str["$".len..], 0)) |s| {
- if (mnem_size) |size| {
- const max = @as(u64, math.maxInt(u64)) >>
- @intCast(u6, 64 - (size.bitSize() - 1));
- if ((if (s < 0) ~s else s) > max)
- return self.fail("Invalid immediate size: '{s}'", .{op_str});
- }
- op.* = .{ .imm = Immediate.s(s) };
- } else |_| if (std.fmt.parseInt(u64, op_str["$".len..], 0)) |u| {
- if (mnem_size) |size| {
- const max = @as(u64, math.maxInt(u64)) >>
- @intCast(u6, 64 - size.bitSize());
- if (u > max)
- return self.fail("Invalid immediate size: '{s}'", .{op_str});
- }
- op.* = .{ .imm = Immediate.u(u) };
- } else |_| return self.fail("Invalid immediate: '{s}'", .{op_str});
- } else return self.fail("Invalid operand: '{s}'", .{op_str});
- } else if (op_it.next()) |op_str| return self.fail("Extra operand: '{s}'", .{op_str});
-
- (switch (ops[0]) {
- .none => self.asmOpOnly(mnem),
- .reg => |reg0| switch (ops[1]) {
- .none => self.asmRegister(mnem, reg0),
- .reg => |reg1| switch (ops[2]) {
- .none => self.asmRegisterRegister(mnem, reg1, reg0),
- .reg => |reg2| switch (ops[3]) {
- .none => self.asmRegisterRegisterRegister(mnem, reg2, reg1, reg0),
- else => error.InvalidInstruction,
- },
- .mem => |mem2| switch (ops[3]) {
- .none => self.asmMemoryRegisterRegister(mnem, mem2, reg1, reg0),
- else => error.InvalidInstruction,
- },
+ op.* = .{ .imm = Immediate.u(u) };
+ } else |_| return self.fail("Invalid immediate: '{s}'", .{op_str});
+ } else return self.fail("Invalid operand: '{s}'", .{op_str});
+ } else if (op_it.next()) |op_str| return self.fail("Extra operand: '{s}'", .{op_str});
+
+ (switch (ops[0]) {
+ .none => self.asmOpOnly(mnem),
+ .reg => |reg0| switch (ops[1]) {
+ .none => self.asmRegister(mnem, reg0),
+ .reg => |reg1| switch (ops[2]) {
+ .none => self.asmRegisterRegister(mnem, reg1, reg0),
+ .reg => |reg2| switch (ops[3]) {
+ .none => self.asmRegisterRegisterRegister(mnem, reg2, reg1, reg0),
else => error.InvalidInstruction,
},
- .mem => |mem1| switch (ops[2]) {
- .none => self.asmMemoryRegister(mnem, mem1, reg0),
+ .mem => |mem2| switch (ops[3]) {
+ .none => self.asmMemoryRegisterRegister(mnem, mem2, reg1, reg0),
else => error.InvalidInstruction,
},
else => error.InvalidInstruction,
},
- .mem => |mem0| switch (ops[1]) {
- .none => self.asmMemory(mnem, mem0),
- .reg => |reg1| switch (ops[2]) {
- .none => self.asmRegisterMemory(mnem, reg1, mem0),
- else => error.InvalidInstruction,
- },
+ .mem => |mem1| switch (ops[2]) {
+ .none => self.asmMemoryRegister(mnem, mem1, reg0),
else => error.InvalidInstruction,
},
- .imm => |imm0| switch (ops[1]) {
- .none => self.asmImmediate(mnem, imm0),
- .reg => |reg1| switch (ops[2]) {
- .none => self.asmRegisterImmediate(mnem, reg1, imm0),
- .reg => |reg2| switch (ops[3]) {
- .none => self.asmRegisterRegisterImmediate(mnem, reg2, reg1, imm0),
- else => error.InvalidInstruction,
- },
- .mem => |mem2| switch (ops[3]) {
- .none => self.asmMemoryRegisterImmediate(mnem, mem2, reg1, imm0),
- else => error.InvalidInstruction,
- },
+ else => error.InvalidInstruction,
+ },
+ .mem => |mem0| switch (ops[1]) {
+ .none => self.asmMemory(mnem, mem0),
+ .reg => |reg1| switch (ops[2]) {
+ .none => self.asmRegisterMemory(mnem, reg1, mem0),
+ else => error.InvalidInstruction,
+ },
+ else => error.InvalidInstruction,
+ },
+ .imm => |imm0| switch (ops[1]) {
+ .none => self.asmImmediate(mnem, imm0),
+ .reg => |reg1| switch (ops[2]) {
+ .none => self.asmRegisterImmediate(mnem, reg1, imm0),
+ .reg => |reg2| switch (ops[3]) {
+ .none => self.asmRegisterRegisterImmediate(mnem, reg2, reg1, imm0),
else => error.InvalidInstruction,
},
- .mem => |mem1| switch (ops[2]) {
- .none => self.asmMemoryImmediate(mnem, mem1, imm0),
+ .mem => |mem2| switch (ops[3]) {
+ .none => self.asmMemoryRegisterImmediate(mnem, mem2, reg1, imm0),
else => error.InvalidInstruction,
},
else => error.InvalidInstruction,
},
- }) catch |err| switch (err) {
- error.InvalidInstruction => return self.fail(
- "Invalid instruction: '{s} {s} {s} {s} {s}'",
- .{
- @tagName(mnem),
- @tagName(ops[0]),
- @tagName(ops[1]),
- @tagName(ops[2]),
- @tagName(ops[3]),
- },
- ),
- else => |e| return e,
- };
- }
+ .mem => |mem1| switch (ops[2]) {
+ .none => self.asmMemoryImmediate(mnem, mem1, imm0),
+ else => error.InvalidInstruction,
+ },
+ else => error.InvalidInstruction,
+ },
+ }) catch |err| switch (err) {
+ error.InvalidInstruction => return self.fail(
+ "Invalid instruction: '{s} {s} {s} {s} {s}'",
+ .{
+ @tagName(mnem),
+ @tagName(ops[0]),
+ @tagName(ops[1]),
+ @tagName(ops[2]),
+ @tagName(ops[3]),
+ },
+ ),
+ else => |e| return e,
+ };
}
simple: {
@@ -7052,25 +6788,10 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs);
return self.finishAir(inst, result, buf);
}
- var bt = try self.iterateBigTomb(inst, outputs.len + inputs.len);
- for (outputs) |output| {
- if (output == .none) continue;
-
- bt.feed(output);
- }
- for (inputs) |input| {
- bt.feed(input);
- }
- return bt.finishAir(result);
-}
-
-fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigTomb {
- try self.ensureProcessDeathCapacity(operand_count + 1);
- return BigTomb{
- .function = self,
- .inst = inst,
- .lbt = self.liveness.iterateBigTomb(inst),
- };
+ var bt = self.liveness.iterateBigTomb(inst);
+ for (outputs) |output| if (output != .none) self.feed(&bt, output);
+ for (inputs) |input| self.feed(&bt, input);
+ return self.finishAirResult(inst, result);
}
/// Sets the value without any modifications to register allocation metadata or stack allocation metadata.
@@ -7952,7 +7673,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result = if (self.liveness.isUnused(inst)) .dead else result: {
+ const result = result: {
const src_mcv = try self.resolveInst(un_op);
if (self.reuseOperand(inst, un_op, 0, src_mcv)) break :result src_mcv;
@@ -7966,7 +7687,7 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void {
fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result = if (self.liveness.isUnused(inst)) .dead else result: {
+ const result = result: {
const operand = try self.resolveInst(ty_op.operand);
if (self.reuseOperand(inst, ty_op.operand, 0, operand)) break :result operand;
@@ -7991,28 +7712,24 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const ptr = try self.resolveInst(ty_op.operand);
const array_ty = ptr_ty.childType();
const array_len = array_ty.arrayLen();
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: {
- const stack_offset = @intCast(i32, try self.allocMem(inst, 16, 16));
- try self.genSetStack(ptr_ty, stack_offset, ptr, .{});
- try self.genSetStack(Type.u64, stack_offset - 8, .{ .immediate = array_len }, .{});
- break :blk .{ .stack_offset = stack_offset };
- };
+
+ const stack_offset = @intCast(i32, try self.allocMem(inst, 16, 16));
+ try self.genSetStack(ptr_ty, stack_offset, ptr, .{});
+ try self.genSetStack(Type.u64, stack_offset - 8, .{ .immediate = array_len }, .{});
+
+ const result = MCValue{ .stack_offset = stack_offset };
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst))
- .dead
- else
- return self.fail("TODO implement airIntToFloat for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ _ = ty_op;
+ return self.fail("TODO implement airIntToFloat for {}", .{self.target.cpu.arch});
+ //return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- if (self.liveness.isUnused(inst))
- return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
const src_ty = self.air.typeOf(ty_op.operand);
const dst_ty = self.air.typeOfIndex(inst);
@@ -8114,7 +7831,7 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
}
const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
+ if (self.liveness.isUnused(inst)) break :result .unreach;
if (val_abi_size <= 8) {
self.eflags_inst = inst;
@@ -8212,7 +7929,7 @@ fn atomicOp(
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} } });
- return if (unused) .none else dst_mcv;
+ return if (unused) .unreach else dst_mcv;
},
.loop => _ = if (val_abi_size <= 8) {
const tmp_reg = try self.register_manager.allocReg(null, gp);
@@ -8285,7 +8002,7 @@ fn atomicOp(
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} } });
_ = try self.asmJccReloc(loop, .ne);
- return if (unused) .none else .{ .register = .rax };
+ return if (unused) .unreach else .{ .register = .rax };
} else {
try self.asmRegisterMemory(.mov, .rax, Memory.sib(.qword, .{
.base = ptr_mem.sib.base,
@@ -8354,7 +8071,7 @@ fn atomicOp(
} });
_ = try self.asmJccReloc(loop, .ne);
- if (unused) return .none;
+ if (unused) return .unreach;
const dst_mcv = try self.allocTempRegOrMem(val_ty, false);
try self.asmMemoryRegister(
.mov,
@@ -8396,27 +8113,22 @@ fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void {
fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void {
const atomic_load = self.air.instructions.items(.data)[inst].atomic_load;
- const result: MCValue = result: {
- if (self.liveness.isUnused(inst)) break :result .dead;
-
- const ptr_ty = self.air.typeOf(atomic_load.ptr);
- const ptr_mcv = try self.resolveInst(atomic_load.ptr);
- const ptr_lock = switch (ptr_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
+ const ptr_ty = self.air.typeOf(atomic_load.ptr);
+ const ptr_mcv = try self.resolveInst(atomic_load.ptr);
+ const ptr_lock = switch (ptr_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
- const dst_mcv =
- if (self.reuseOperand(inst, atomic_load.ptr, 0, ptr_mcv))
- ptr_mcv
- else
- try self.allocRegOrMem(inst, true);
+ const dst_mcv =
+ if (self.reuseOperand(inst, atomic_load.ptr, 0, ptr_mcv))
+ ptr_mcv
+ else
+ try self.allocRegOrMem(inst, true);
- try self.load(dst_mcv, ptr_mcv, ptr_ty);
- break :result dst_mcv;
- };
- return self.finishAir(inst, result, .{ atomic_load.ptr, .none, .none });
+ try self.load(dst_mcv, ptr_mcv, ptr_ty);
+ return self.finishAir(inst, dst_mcv, .{ atomic_load.ptr, .none, .none });
}
fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void {
@@ -8459,7 +8171,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index) !void {
try self.genInlineMemset(dst_ptr, src_val, len, .{});
- return self.finishAir(inst, .none, .{ pl_op.operand, extra.lhs, extra.rhs });
+ return self.finishAir(inst, .unreach, .{ pl_op.operand, extra.lhs, extra.rhs });
}
fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
@@ -8489,128 +8201,129 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
try self.genInlineMemcpy(dst_ptr, src_ptr, len, .{});
- return self.finishAir(inst, .none, .{ pl_op.operand, extra.lhs, extra.rhs });
+ return self.finishAir(inst, .unreach, .{ pl_op.operand, extra.lhs, extra.rhs });
}
fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else {
- _ = operand;
- return self.fail("TODO implement airTagName for x86_64", .{});
- };
- return self.finishAir(inst, result, .{ un_op, .none, .none });
+ _ = operand;
+ return self.fail("TODO implement airTagName for x86_64", .{});
+ //return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const err_ty = self.air.typeOf(un_op);
- const err_mcv = try self.resolveInst(un_op);
- const err_reg = try self.copyToTmpRegister(err_ty, err_mcv);
- const err_lock = self.register_manager.lockRegAssumeUnused(err_reg);
- defer self.register_manager.unlockReg(err_lock);
-
- const addr_reg = try self.register_manager.allocReg(null, gp);
- const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
- defer self.register_manager.unlockReg(addr_lock);
-
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForLazySymbol(
- .{ .kind = .const_data, .ty = Type.anyerror },
- 4, // dword alignment
- );
- const got_addr = elf_file.getAtom(atom_index).getOffsetTableAddress(elf_file);
- try self.asmRegisterMemory(.mov, addr_reg.to64(), Memory.sib(.qword, .{
- .base = .ds,
- .disp = @intCast(i32, got_addr),
- }));
- } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom_index = try coff_file.getOrCreateAtomForLazySymbol(
- .{ .kind = .const_data, .ty = Type.anyerror },
- 4, // dword alignment
- );
- const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
- try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index });
- } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- const atom_index = try macho_file.getOrCreateAtomForLazySymbol(
- .{ .kind = .const_data, .ty = Type.anyerror },
- 4, // dword alignment
- );
- const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
- try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index });
- } else {
- return self.fail("TODO implement airErrorName for x86_64 {s}", .{@tagName(self.bin_file.tag)});
- }
-
- const start_reg = try self.register_manager.allocReg(null, gp);
- const start_lock = self.register_manager.lockRegAssumeUnused(start_reg);
- defer self.register_manager.unlockReg(start_lock);
- const end_reg = try self.register_manager.allocReg(null, gp);
- const end_lock = self.register_manager.lockRegAssumeUnused(end_reg);
- defer self.register_manager.unlockReg(end_lock);
+ const err_ty = self.air.typeOf(un_op);
+ const err_mcv = try self.resolveInst(un_op);
+ const err_reg = try self.copyToTmpRegister(err_ty, err_mcv);
+ const err_lock = self.register_manager.lockRegAssumeUnused(err_reg);
+ defer self.register_manager.unlockReg(err_lock);
- try self.truncateRegister(err_ty, err_reg.to32());
+ const addr_reg = try self.register_manager.allocReg(null, gp);
+ const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
+ defer self.register_manager.unlockReg(addr_lock);
- try self.asmRegisterMemory(.mov, start_reg.to32(), Memory.sib(.dword, .{
- .base = addr_reg.to64(),
- .scale_index = .{ .scale = 4, .index = err_reg.to64() },
- .disp = 4,
- }));
- try self.asmRegisterMemory(.mov, end_reg.to32(), Memory.sib(.dword, .{
- .base = addr_reg.to64(),
- .scale_index = .{ .scale = 4, .index = err_reg.to64() },
- .disp = 8,
- }));
- try self.asmRegisterRegister(.sub, end_reg.to32(), start_reg.to32());
- try self.asmRegisterMemory(.lea, start_reg.to64(), Memory.sib(.byte, .{
- .base = addr_reg.to64(),
- .scale_index = .{ .scale = 1, .index = start_reg.to64() },
- .disp = 0,
- }));
- try self.asmRegisterMemory(.lea, end_reg.to32(), Memory.sib(.byte, .{
- .base = end_reg.to64(),
- .disp = -1,
+ if (self.bin_file.cast(link.File.Elf)) |elf_file| {
+ const atom_index = try elf_file.getOrCreateAtomForLazySymbol(
+ .{ .kind = .const_data, .ty = Type.anyerror },
+ 4, // dword alignment
+ );
+ const got_addr = elf_file.getAtom(atom_index).getOffsetTableAddress(elf_file);
+ try self.asmRegisterMemory(.mov, addr_reg.to64(), Memory.sib(.qword, .{
+ .base = .ds,
+ .disp = @intCast(i32, got_addr),
}));
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
+ const atom_index = try coff_file.getOrCreateAtomForLazySymbol(
+ .{ .kind = .const_data, .ty = Type.anyerror },
+ 4, // dword alignment
+ );
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
+ try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index });
+ } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
+ const atom_index = try macho_file.getOrCreateAtomForLazySymbol(
+ .{ .kind = .const_data, .ty = Type.anyerror },
+ 4, // dword alignment
+ );
+ const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
+ try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index });
+ } else {
+ return self.fail("TODO implement airErrorName for x86_64 {s}", .{@tagName(self.bin_file.tag)});
+ }
- const dst_mcv = try self.allocRegOrMem(inst, false);
- try self.asmMemoryRegister(.mov, Memory.sib(.qword, .{
- .base = .rbp,
- .disp = 0 - dst_mcv.stack_offset,
- }), start_reg.to64());
- try self.asmMemoryRegister(.mov, Memory.sib(.qword, .{
- .base = .rbp,
- .disp = 8 - dst_mcv.stack_offset,
- }), end_reg.to64());
- break :result dst_mcv;
- };
- return self.finishAir(inst, result, .{ un_op, .none, .none });
+ const start_reg = try self.register_manager.allocReg(null, gp);
+ const start_lock = self.register_manager.lockRegAssumeUnused(start_reg);
+ defer self.register_manager.unlockReg(start_lock);
+
+ const end_reg = try self.register_manager.allocReg(null, gp);
+ const end_lock = self.register_manager.lockRegAssumeUnused(end_reg);
+ defer self.register_manager.unlockReg(end_lock);
+
+ try self.truncateRegister(err_ty, err_reg.to32());
+
+ try self.asmRegisterMemory(.mov, start_reg.to32(), Memory.sib(.dword, .{
+ .base = addr_reg.to64(),
+ .scale_index = .{ .scale = 4, .index = err_reg.to64() },
+ .disp = 4,
+ }));
+ try self.asmRegisterMemory(.mov, end_reg.to32(), Memory.sib(.dword, .{
+ .base = addr_reg.to64(),
+ .scale_index = .{ .scale = 4, .index = err_reg.to64() },
+ .disp = 8,
+ }));
+ try self.asmRegisterRegister(.sub, end_reg.to32(), start_reg.to32());
+ try self.asmRegisterMemory(.lea, start_reg.to64(), Memory.sib(.byte, .{
+ .base = addr_reg.to64(),
+ .scale_index = .{ .scale = 1, .index = start_reg.to64() },
+ .disp = 0,
+ }));
+ try self.asmRegisterMemory(.lea, end_reg.to32(), Memory.sib(.byte, .{
+ .base = end_reg.to64(),
+ .disp = -1,
+ }));
+
+ const dst_mcv = try self.allocRegOrMem(inst, false);
+ try self.asmMemoryRegister(.mov, Memory.sib(.qword, .{
+ .base = .rbp,
+ .disp = 0 - dst_mcv.stack_offset,
+ }), start_reg.to64());
+ try self.asmMemoryRegister(.mov, Memory.sib(.qword, .{
+ .base = .rbp,
+ .disp = 8 - dst_mcv.stack_offset,
+ }), end_reg.to64());
+
+ return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none });
}
fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airSplat for x86_64", .{});
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ _ = ty_op;
+ return self.fail("TODO implement airSplat for x86_64", .{});
+ //return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airSelect for x86_64", .{});
- return self.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs });
+ _ = extra;
+ return self.fail("TODO implement airSelect for x86_64", .{});
+ //return self.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs });
}
fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airShuffle for x86_64", .{});
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ _ = ty_op;
+ return self.fail("TODO implement airShuffle for x86_64", .{});
+ //return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
const reduce = self.air.instructions.items(.data)[inst].reduce;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airReduce for x86_64", .{});
- return self.finishAir(inst, result, .{ reduce.operand, .none, .none });
+ _ = reduce;
+ return self.fail("TODO implement airReduce for x86_64", .{});
+ //return self.finishAir(inst, result, .{ reduce.operand, .none, .none });
}
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
@@ -8620,8 +8333,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const abi_size = @intCast(u32, result_ty.abiSize(self.target.*));
const abi_align = result_ty.abiAlignment(self.target.*);
- const result: MCValue = res: {
- if (self.liveness.isUnused(inst)) break :res MCValue.dead;
+ const result: MCValue = result: {
switch (result_ty.zigTypeTag()) {
.Struct => {
const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align));
@@ -8712,7 +8424,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
};
try self.genSetStack(elem_ty, stack_offset - elem_off, mat_elem_mcv, .{});
}
- break :res .{ .stack_offset = stack_offset };
+ break :result .{ .stack_offset = stack_offset };
},
.Array => {
const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align));
@@ -8728,7 +8440,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const elem_off = @intCast(i32, elem_size * elem_i);
try self.genSetStack(elem_ty, stack_offset - elem_off, mat_elem_mcv, .{});
}
- break :res MCValue{ .stack_offset = stack_offset };
+ break :result MCValue{ .stack_offset = stack_offset };
},
.Vector => return self.fail("TODO implement aggregate_init for vectors", .{}),
else => unreachable,
@@ -8740,82 +8452,70 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
std.mem.copy(Air.Inst.Ref, &buf, elements);
return self.finishAir(inst, result, buf);
}
- var bt = try self.iterateBigTomb(inst, elements.len);
- for (elements) |elem| {
- bt.feed(elem);
- }
- return bt.finishAir(result);
+ var bt = self.liveness.iterateBigTomb(inst);
+ for (elements) |elem| self.feed(&bt, elem);
+ return self.finishAirResult(inst, result);
}
fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
- const result: MCValue = res: {
- if (self.liveness.isUnused(inst)) break :res MCValue.dead;
- return self.fail("TODO implement airAggregateInit for x86_64", .{});
- };
- return self.finishAir(inst, result, .{ extra.init, .none, .none });
+ _ = extra;
+ return self.fail("TODO implement airAggregateInit for x86_64", .{});
+ //return self.finishAir(inst, result, .{ extra.init, .none, .none });
}
fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void {
const prefetch = self.air.instructions.items(.data)[inst].prefetch;
- return self.finishAir(inst, MCValue.dead, .{ prefetch.ptr, .none, .none });
+ return self.finishAir(inst, .unreach, .{ prefetch.ptr, .none, .none });
}
fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else {
- return self.fail("TODO implement airMulAdd for x86_64", .{});
- };
- return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, pl_op.operand });
+ _ = extra;
+ return self.fail("TODO implement airMulAdd for x86_64", .{});
+ //return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, pl_op.operand });
}
-fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
- // First section of indexes correspond to a set number of constant values.
- const ref_int = @enumToInt(inst);
- if (ref_int < Air.Inst.Ref.typed_value_map.len) {
- const tv = Air.Inst.Ref.typed_value_map[ref_int];
- if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) {
- return .none;
- }
- return self.genTypedValue(tv);
- }
+fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
+ const ty = self.air.typeOf(ref);
// If the type has no codegen bits, no need to store it.
- const inst_ty = self.air.typeOf(inst);
- if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError())
- return .none;
-
- const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
- switch (self.air.instructions.items(.tag)[inst_index]) {
- .constant => {
- // Constants have static lifetimes, so they are always memoized in the outer most table.
- const branch = &self.branch_stack.items[0];
- const gop = try branch.inst_table.getOrPut(self.gpa, inst_index);
- if (!gop.found_existing) {
- const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl;
- gop.value_ptr.* = try self.genTypedValue(.{
- .ty = inst_ty,
- .val = self.air.values[ty_pl.payload],
- });
- }
- return gop.value_ptr.*;
- },
- .const_ty => unreachable,
- else => return self.getResolvedInstValue(inst_index).?.*,
+ if (!ty.hasRuntimeBitsIgnoreComptime() and !ty.isError()) return .none;
+
+ if (Air.refToIndex(ref)) |inst| {
+ const mcv = switch (self.air.instructions.items(.tag)[inst]) {
+ .constant => tracking: {
+ const gop = try self.const_tracking.getOrPut(self.gpa, inst);
+ if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(try self.genTypedValue(.{
+ .ty = ty,
+ .val = self.air.value(ref).?,
+ }));
+ break :tracking gop.value_ptr;
+ },
+ .const_ty => unreachable,
+ else => self.inst_tracking.getPtr(inst).?,
+ }.short;
+ switch (mcv) {
+ .none, .unreach, .dead => unreachable,
+ else => return mcv,
+ }
}
+
+ return self.genTypedValue(.{ .ty = ty, .val = self.air.value(ref).? });
}
-fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) ?*MCValue {
- // Treat each stack item as a "layer" on top of the previous one.
- var i: usize = self.branch_stack.items.len;
- while (true) {
- i -= 1;
- if (self.branch_stack.items[i].inst_table.getPtr(inst)) |mcv| {
- return if (mcv.* != .dead) mcv else null;
- }
- }
+fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking {
+ const tracking = switch (self.air.instructions.items(.tag)[inst]) {
+ .constant => &self.const_tracking,
+ .const_ty => unreachable,
+ else => &self.inst_tracking,
+ }.getPtr(inst).?;
+ return switch (tracking.short) {
+ .none, .unreach, .dead => unreachable,
+ else => tracking,
+ };
}
/// If the MCValue is an immediate, and it does not fit within this type,