aboutsummaryrefslogtreecommitdiff
path: root/src/arch/arm/CodeGen.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2022-07-01 15:52:54 -0700
committerAndrew Kelley <andrew@ziglang.org>2022-07-01 15:52:54 -0700
commitc89dd15e1be4959800dc7092d7dd4375253db7bc (patch)
treeca184ae53592efa21e67128a5f891d642d7f1118 /src/arch/arm/CodeGen.zig
parent5466e87fce581f2ef90ac23bb80b1dbc05836fc6 (diff)
parent2360f8c490f3ec684ed64ff28e8c1fade249070b (diff)
downloadzig-c89dd15e1be4959800dc7092d7dd4375253db7bc.tar.gz
zig-c89dd15e1be4959800dc7092d7dd4375253db7bc.zip
Merge remote-tracking branch 'origin/master' into llvm14
Diffstat (limited to 'src/arch/arm/CodeGen.zig')
-rw-r--r--src/arch/arm/CodeGen.zig4313
1 files changed, 2929 insertions, 1384 deletions
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index c7e80dbe24..b1b5c0fcb3 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -3,8 +3,8 @@ const builtin = @import("builtin");
const mem = std.mem;
const math = std.math;
const assert = std.debug.assert;
+const codegen = @import("../../codegen.zig");
const Air = @import("../../Air.zig");
-const Zir = @import("../../Zir.zig");
const Mir = @import("Mir.zig");
const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig");
@@ -22,21 +22,37 @@ const DW = std.dwarf;
const leb128 = std.leb;
const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
-const RegisterManager = @import("../../register_manager.zig").RegisterManager;
-const FnResult = @import("../../codegen.zig").FnResult;
-const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
-const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
+const FnResult = codegen.FnResult;
+const GenerateSymbolError = codegen.GenerateSymbolError;
+const DebugInfoOutput = codegen.DebugInfoOutput;
+
+const bits = @import("bits.zig");
+const abi = @import("abi.zig");
+const errUnionPayloadOffset = codegen.errUnionPayloadOffset;
+const errUnionErrorOffset = codegen.errUnionErrorOffset;
+const RegisterManager = abi.RegisterManager;
+const RegisterLock = RegisterManager.RegisterLock;
+const Register = bits.Register;
+const Instruction = bits.Instruction;
+const Condition = bits.Condition;
+const callee_preserved_regs = abi.callee_preserved_regs;
+const caller_preserved_regs = abi.caller_preserved_regs;
+const c_abi_int_param_regs = abi.c_abi_int_param_regs;
+const c_abi_int_return_regs = abi.c_abi_int_return_regs;
+const gp = abi.RegisterClass.gp;
const InnerError = error{
OutOfMemory,
CodegenFail,
+ OutOfRegisters,
};
gpa: Allocator,
air: Air,
liveness: Liveness,
bin_file: *link.File,
+debug_output: DebugInfoOutput,
target: *const std.Target,
mod_fn: *const Module.Fn,
err_msg: ?*ErrorMsg,
@@ -61,6 +77,12 @@ end_di_column: u32,
/// which is a relative jump, based on the address following the reloc.
exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .{},
+/// For every argument, we postpone the creation of debug info for
+/// later after all Mir instructions have been generated. Only then we
+/// will know saved_regs_stack_space which is necessary in order to
+/// address parameters passed on the stack.
+dbg_arg_relocs: std.ArrayListUnmanaged(DbgArgReloc) = .{},
+
/// Whenever there is a runtime branch, we push a Branch onto this stack,
/// and pop it off when the runtime branch joins. This provides an "overlay"
/// of the table of mappings from instructions to `MCValue` from within the branch.
@@ -73,11 +95,11 @@ branch_stack: *std.ArrayList(Branch),
// Key is the block instruction
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{},
-register_manager: RegisterManager(Self, Register, &callee_preserved_regs) = .{},
+register_manager: RegisterManager = .{},
/// Maps offset to what is stored there.
stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{},
/// Tracks the current instruction allocated to the compare flags
-compare_flags_inst: ?Air.Inst.Index = null,
+cpsr_flags_inst: ?Air.Inst.Index = null,
/// Offset from the stack base, representing the end of the stack frame.
max_end_stack: u32 = 0,
@@ -93,9 +115,12 @@ air_bookkeeping: @TypeOf(air_bookkeeping_init) = air_bookkeeping_init,
const air_bookkeeping_init = if (std.debug.runtime_safety) @as(usize, 0) else {};
const MCValue = union(enum) {
- /// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc.
- /// TODO Look into deleting this tag and using `dead` instead, since every use
- /// of MCValue.none should be instead looking at the type and noticing it is 0 bits.
+ /// No runtime bits. `void` types, empty structs, u0, enums with 1
+ /// tag, etc.
+ ///
+ /// TODO Look into deleting this tag and using `dead` instead,
+ /// since every use of MCValue.none should be instead looking at
+ /// the type and noticing it is 0 bits.
none,
/// Control flow will not allow this value to be observed.
unreach,
@@ -104,35 +129,50 @@ const MCValue = union(enum) {
/// The value is undefined.
undef,
/// A pointer-sized integer that fits in a register.
- /// If the type is a pointer, this is the pointer address in virtual address space.
+ ///
+ /// If the type is a pointer, this is the pointer address in
+ /// virtual address space.
immediate: u32,
- /// The constant was emitted into the code, at this offset.
- /// If the type is a pointer, it means the pointer address is embedded in the code.
- embedded_in_code: usize,
- /// The value is a pointer to a constant which was emitted into the code, at this offset.
- ptr_embedded_in_code: usize,
/// The value is in a target-specific register.
register: Register,
+ /// The value is a tuple { wrapped: u32, overflow: u1 } where
+ /// wrapped is stored in the register and the overflow bit is
+ /// stored in the C flag of the CPSR.
+ ///
+ /// This MCValue is only generated by a add_with_overflow or
+ /// sub_with_overflow instruction operating on u32.
+ register_c_flag: Register,
+ /// The value is a tuple { wrapped: i32, overflow: u1 } where
+ /// wrapped is stored in the register and the overflow bit is
+ /// stored in the V flag of the CPSR.
+ ///
+ /// This MCValue is only generated by a add_with_overflow or
+ /// sub_with_overflow instruction operating on i32.
+ register_v_flag: Register,
/// The value is in memory at a hard-coded address.
- /// If the type is a pointer, it means the pointer address is at this memory location.
+ ///
+ /// If the type is a pointer, it means the pointer address is at
+ /// this memory location.
memory: u64,
/// The value is one of the stack variables.
- /// If the type is a pointer, it means the pointer address is in the stack at this offset.
+ ///
+ /// If the type is a pointer, it means the pointer address is in
+ /// the stack at this offset.
stack_offset: u32,
- /// The value is a pointer to one of the stack variables (payload is stack offset).
+ /// The value is a pointer to one of the stack variables (payload
+ /// is stack offset).
ptr_stack_offset: u32,
- /// The value is in the compare flags assuming an unsigned operation,
- /// with this operator applied on top of it.
- compare_flags_unsigned: math.CompareOperator,
- /// The value is in the compare flags assuming a signed operation,
- /// with this operator applied on top of it.
- compare_flags_signed: math.CompareOperator,
+ /// The value resides in the N, Z, C, V flags of the Current
+ /// Program Status Register (CPSR). The value is 1 (if the type is
+ /// u1) or true (if the type in bool) iff the specified condition
+ /// is true.
+ cpsr_flags: Condition,
/// The value is a function argument passed via the stack.
stack_argument_offset: u32,
fn isMemory(mcv: MCValue) bool {
return switch (mcv) {
- .embedded_in_code, .memory, .stack_offset, .stack_argument_offset => true,
+ .memory, .stack_offset, .stack_argument_offset => true,
else => false,
};
}
@@ -151,12 +191,9 @@ const MCValue = union(enum) {
.dead => unreachable,
.immediate,
- .embedded_in_code,
.memory,
- .compare_flags_unsigned,
- .compare_flags_signed,
+ .cpsr_flags,
.ptr_stack_offset,
- .ptr_embedded_in_code,
.undef,
.stack_argument_offset,
=> false,
@@ -195,26 +232,12 @@ const BlockData = struct {
const BigTomb = struct {
function: *Self,
inst: Air.Inst.Index,
- tomb_bits: Liveness.Bpi,
- big_tomb_bits: u32,
- bit_index: usize,
+ lbt: Liveness.BigTomb,
fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) void {
- const this_bit_index = bt.bit_index;
- bt.bit_index += 1;
-
- const op_int = @enumToInt(op_ref);
- if (op_int < Air.Inst.Ref.typed_value_map.len) return;
- const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
-
- if (this_bit_index < Liveness.bpi - 1) {
- const dies = @truncate(u1, bt.tomb_bits >> @intCast(Liveness.OperandInt, this_bit_index)) != 0;
- if (!dies) return;
- } else {
- const big_bit_index = @intCast(u5, this_bit_index - (Liveness.bpi - 1));
- const dies = @truncate(u1, bt.big_tomb_bits >> big_bit_index) != 0;
- if (!dies) return;
- }
+ const dies = bt.lbt.feed();
+ const op_index = Air.refToIndex(op_ref) orelse return;
+ if (!dies) return;
bt.function.processDeath(op_index);
}
@@ -229,6 +252,11 @@ const BigTomb = struct {
}
};
+const DbgArgReloc = struct {
+ inst: Air.Inst.Index,
+ index: u32,
+};
+
const Self = @This();
pub fn generate(
@@ -244,8 +272,10 @@ pub fn generate(
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
- assert(module_fn.owner_decl.has_tv);
- const fn_type = module_fn.owner_decl.ty;
+ const mod = bin_file.options.module.?;
+ const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
+ assert(fn_owner_decl.has_tv);
+ const fn_type = fn_owner_decl.ty;
var branch_stack = std.ArrayList(Branch).init(bin_file.allocator);
defer {
@@ -261,6 +291,7 @@ pub fn generate(
.liveness = liveness,
.target = &bin_file.options.target,
.bin_file = bin_file,
+ .debug_output = debug_output,
.mod_fn = module_fn,
.err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
@@ -276,9 +307,13 @@ pub fn generate(
defer function.stack.deinit(bin_file.allocator);
defer function.blocks.deinit(bin_file.allocator);
defer function.exitlude_jump_relocs.deinit(bin_file.allocator);
+ defer function.dbg_arg_relocs.deinit(bin_file.allocator);
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return FnResult{
+ .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
+ },
else => |e| return e,
};
defer call_info.deinit(&function);
@@ -290,9 +325,16 @@ pub fn generate(
function.gen() catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return FnResult{
+ .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
+ },
else => |e| return e,
};
+ for (function.dbg_arg_relocs.items) |reloc| {
+ try function.genArgDbgInfo(reloc.inst, reloc.index, call_info.stack_byte_count);
+ }
+
var mir = Mir{
.instructions = function.mir_instructions.toOwnedSlice(),
.extra = function.mir_extra.toOwnedSlice(bin_file.allocator),
@@ -302,7 +344,6 @@ pub fn generate(
var emit = Emit{
.mir = mir,
.bin_file = bin_file,
- .function = &function,
.debug_output = debug_output,
.target = &bin_file.options.target,
.src_loc = src_loc,
@@ -381,6 +422,18 @@ fn gen(self: *Self) !void {
// sub sp, sp, #reloc
const sub_reloc = try self.addNop();
+ if (self.ret_mcv == .stack_offset) {
+ // The address of where to store the return value is in
+ // r0. As this register might get overwritten along the
+ // way, save the address to the stack.
+ const stack_offset = mem.alignForwardGeneric(u32, self.next_stack_offset, 4) + 4;
+ self.next_stack_offset = stack_offset;
+ self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset);
+
+ try self.genSetStack(Type.usize, stack_offset, MCValue{ .register = .r0 });
+ self.ret_mcv = MCValue{ .stack_offset = stack_offset };
+ }
+
_ = try self.addInst(.{
.tag = .dbg_prologue_end,
.cond = undefined,
@@ -427,14 +480,17 @@ fn gen(self: *Self) !void {
});
// exitlude jumps
- if (self.exitlude_jump_relocs.items.len == 1) {
- // There is only one relocation. Hence,
- // this relocation must be at the end of
- // the code. Therefore, we can just delete
- // the space initially reserved for the
- // jump
- self.mir_instructions.len -= 1;
- } else for (self.exitlude_jump_relocs.items) |jmp_reloc| {
+ if (self.exitlude_jump_relocs.items.len > 0 and
+ self.exitlude_jump_relocs.items[self.exitlude_jump_relocs.items.len - 1] == self.mir_instructions.len - 2)
+ {
+ // If the last Mir instruction (apart from the
+ // dbg_epilogue_begin) is the last exitlude jump
+ // relocation (which would just jump one instruction
+ // further), it can be safely removed
+ self.mir_instructions.orderedRemove(self.exitlude_jump_relocs.pop());
+ }
+
+ for (self.exitlude_jump_relocs.items) |jmp_reloc| {
self.mir_instructions.set(jmp_reloc, .{
.tag = .b,
.data = .{ .inst = @intCast(u32, self.mir_instructions.len) },
@@ -496,25 +552,55 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
switch (air_tags[inst]) {
// zig fmt: off
- .add, .ptr_add => try self.airAdd(inst),
- .addwrap => try self.airAddWrap(inst),
+ .add, => try self.airBinOp(inst, .add),
+ .addwrap => try self.airBinOp(inst, .addwrap),
+ .sub, => try self.airBinOp(inst, .sub),
+ .subwrap => try self.airBinOp(inst, .subwrap),
+ .mul => try self.airBinOp(inst, .mul),
+ .mulwrap => try self.airBinOp(inst, .mulwrap),
+ .shl => try self.airBinOp(inst, .shl),
+ .shl_exact => try self.airBinOp(inst, .shl_exact),
+ .bool_and => try self.airBinOp(inst, .bool_and),
+ .bool_or => try self.airBinOp(inst, .bool_or),
+ .bit_and => try self.airBinOp(inst, .bit_and),
+ .bit_or => try self.airBinOp(inst, .bit_or),
+ .xor => try self.airBinOp(inst, .xor),
+ .shr => try self.airBinOp(inst, .shr),
+ .shr_exact => try self.airBinOp(inst, .shr_exact),
+
+ .ptr_add => try self.airPtrArithmetic(inst, .ptr_add),
+ .ptr_sub => try self.airPtrArithmetic(inst, .ptr_sub),
+
+ .min => try self.airMinMax(inst),
+ .max => try self.airMinMax(inst),
+
.add_sat => try self.airAddSat(inst),
- .sub, .ptr_sub => try self.airSub(inst),
- .subwrap => try self.airSubWrap(inst),
.sub_sat => try self.airSubSat(inst),
- .mul => try self.airMul(inst),
- .mulwrap => try self.airMulWrap(inst),
.mul_sat => try self.airMulSat(inst),
.rem => try self.airRem(inst),
.mod => try self.airMod(inst),
- .shl, .shl_exact => try self.airShl(inst),
.shl_sat => try self.airShlSat(inst),
- .min => try self.airMin(inst),
- .max => try self.airMax(inst),
.slice => try self.airSlice(inst),
- .add_with_overflow => try self.airAddWithOverflow(inst),
- .sub_with_overflow => try self.airSubWithOverflow(inst),
+ .sqrt,
+ .sin,
+ .cos,
+ .tan,
+ .exp,
+ .exp2,
+ .log,
+ .log2,
+ .log10,
+ .fabs,
+ .floor,
+ .ceil,
+ .round,
+ .trunc_float,
+ .neg,
+ => try self.airUnaryMath(inst),
+
+ .add_with_overflow => try self.airOverflow(inst),
+ .sub_with_overflow => try self.airOverflow(inst),
.mul_with_overflow => try self.airMulWithOverflow(inst),
.shl_with_overflow => try self.airShlWithOverflow(inst),
@@ -527,12 +613,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.cmp_gt => try self.airCmp(inst, .gt),
.cmp_neq => try self.airCmp(inst, .neq),
- .bool_and => try self.airBoolOp(inst),
- .bool_or => try self.airBoolOp(inst),
- .bit_and => try self.airBitAnd(inst),
- .bit_or => try self.airBitOr(inst),
- .xor => try self.airXor(inst),
- .shr, .shr_exact => try self.airShr(inst),
+ .cmp_vector => try self.airCmpVector(inst),
+ .cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst),
.alloc => try self.airAlloc(inst),
.ret_ptr => try self.airRetPtr(inst),
@@ -542,9 +624,9 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
.breakpoint => try self.airBreakpoint(),
- .ret_addr => try self.airRetAddr(),
+ .ret_addr => try self.airRetAddr(inst),
+ .frame_addr => try self.airFrameAddress(inst),
.fence => try self.airFence(),
- .call => try self.airCall(inst),
.cond_br => try self.airCondBr(inst),
.dbg_stmt => try self.airDbgStmt(inst),
.fptrunc => try self.airFptrunc(inst),
@@ -583,11 +665,38 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.clz => try self.airClz(inst),
.ctz => try self.airCtz(inst),
.popcount => try self.airPopcount(inst),
+ .byte_swap => try self.airByteSwap(inst),
+ .bit_reverse => try self.airBitReverse(inst),
.tag_name => try self.airTagName(inst),
.error_name => try self.airErrorName(inst),
.splat => try self.airSplat(inst),
- .vector_init => try self.airVectorInit(inst),
+ .select => try self.airSelect(inst),
+ .shuffle => try self.airShuffle(inst),
+ .reduce => try self.airReduce(inst),
+ .aggregate_init => try self.airAggregateInit(inst),
+ .union_init => try self.airUnionInit(inst),
.prefetch => try self.airPrefetch(inst),
+ .mul_add => try self.airMulAdd(inst),
+
+ .@"try" => try self.airTry(inst),
+ .try_ptr => try self.airTryPtr(inst),
+
+ .dbg_var_ptr,
+ .dbg_var_val,
+ => try self.airDbgVar(inst),
+
+ .dbg_inline_begin,
+ .dbg_inline_end,
+ => try self.airDbgInline(inst),
+
+ .dbg_block_begin,
+ .dbg_block_end,
+ => try self.airDbgBlock(inst),
+
+ .call => try self.airCall(inst, .auto),
+ .call_always_tail => try self.airCall(inst, .always_tail),
+ .call_never_tail => try self.airCall(inst, .never_tail),
+ .call_never_inline => try self.airCall(inst, .never_inline),
.atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
.atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
@@ -599,6 +708,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.struct_field_ptr_index_2 => try self.airStructFieldPtrIndex(inst, 2),
.struct_field_ptr_index_3 => try self.airStructFieldPtrIndex(inst, 3),
+ .field_parent_ptr => try self.airFieldParentPtr(inst),
+
.switch_br => try self.airSwitch(inst),
.slice_ptr => try self.airSlicePtr(inst),
.slice_len => try self.airSliceLen(inst),
@@ -623,14 +734,20 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.unwrap_errunion_payload => try self.airUnwrapErrPayload(inst),
.unwrap_errunion_err_ptr => try self.airUnwrapErrErrPtr(inst),
.unwrap_errunion_payload_ptr=> try self.airUnwrapErrPayloadPtr(inst),
+ .errunion_payload_ptr_set => try self.airErrUnionPayloadPtrSet(inst),
+ .err_return_trace => try self.airErrReturnTrace(inst),
+ .set_err_return_trace => try self.airSetErrReturnTrace(inst),
.wrap_optional => try self.airWrapOptional(inst),
.wrap_errunion_payload => try self.airWrapErrUnionPayload(inst),
.wrap_errunion_err => try self.airWrapErrUnionErr(inst),
+
+ .wasm_memory_size => unreachable,
+ .wasm_memory_grow => unreachable,
// zig fmt: on
}
- assert(!self.register_manager.frozenRegsExist());
+ assert(!self.register_manager.lockedRegsExist());
if (std.debug.runtime_safety) {
if (self.air_bookkeeping < old_air_bookkeeping + 1) {
@@ -640,11 +757,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
}
}
-fn writeInt(self: *Self, comptime T: type, buf: *[@divExact(@typeInfo(T).Int.bits, 8)]u8, value: T) void {
- const endian = self.target.cpu.arch.endian();
- std.mem.writeInt(T, buf, value, endian);
-}
-
/// Asserts there is already capacity to insert into top branch inst_table.
fn processDeath(self: *Self, inst: Air.Inst.Index) void {
const air_tags = self.air.instructions.items(.tag);
@@ -657,8 +769,14 @@ fn processDeath(self: *Self, inst: Air.Inst.Index) void {
.register => |reg| {
self.register_manager.freeReg(reg);
},
- .compare_flags_signed, .compare_flags_unsigned => {
- self.compare_flags_inst = null;
+ .register_c_flag,
+ .register_v_flag,
+ => |reg| {
+ self.register_manager.freeReg(reg);
+ self.cpsr_flags_inst = null;
+ },
+ .cpsr_flags => {
+ self.cpsr_flags_inst = null;
},
else => {}, // TODO process stack allocation death
}
@@ -699,6 +817,17 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live
self.register_manager.getRegAssumeFree(reg, inst);
}
},
+ .register_c_flag,
+ .register_v_flag,
+ => |reg| {
+ if (self.register_manager.isRegFree(reg)) {
+ self.register_manager.getRegAssumeFree(reg, inst);
+ }
+ self.cpsr_flags_inst = inst;
+ },
+ .cpsr_flags => {
+ self.cpsr_flags_inst = inst;
+ },
else => {},
}
}
@@ -714,10 +843,9 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u
if (abi_align > self.stack_align)
self.stack_align = abi_align;
// TODO find a free slot instead of always appending
- const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align);
- self.next_stack_offset = offset + abi_size;
- if (self.next_stack_offset > self.max_end_stack)
- self.max_end_stack = self.next_stack_offset;
+ const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size;
+ self.next_stack_offset = offset;
+ self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset);
try self.stack.putNoClobber(self.gpa, offset, .{
.inst = inst,
.size = abi_size,
@@ -728,8 +856,18 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
const elem_ty = self.air.typeOfIndex(inst).elemType();
- const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
- return self.fail("type '{}' too big to fit into stack frame", .{elem_ty});
+
+ if (!elem_ty.hasRuntimeBits()) {
+ // As this stack item will never be dereferenced at runtime,
+ // return the stack offset 0. Stack offset 0 will be where all
+ // zero-sized stack allocations live as non-zero-sized
+ // allocations will always have an offset > 0.
+ return @as(u32, 0);
+ }
+
+ const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
+ const mod = self.bin_file.options.module.?;
+ return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
// TODO swap this for inst.ty.ptrAlign
const abi_align = elem_ty.abiAlignment(self.target.*);
@@ -738,8 +876,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
- const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
- return self.fail("type '{}' too big to fit into stack frame", .{elem_ty});
+ const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
+ const mod = self.bin_file.options.module.?;
+ return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
const abi_align = elem_ty.abiAlignment(self.target.*);
if (abi_align > self.stack_align)
@@ -750,7 +889,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
if (abi_size <= ptr_bytes) {
- if (self.register_manager.tryAllocReg(inst)) |reg| {
+ if (self.register_manager.tryAllocReg(inst, gp)) |reg| {
return MCValue{ .register = reg };
}
}
@@ -762,8 +901,16 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void {
const stack_mcv = try self.allocRegOrMem(inst, false);
log.debug("spilling {} (%{d}) to stack mcv {any}", .{ reg, inst, stack_mcv });
+
const reg_mcv = self.getResolvedInstValue(inst);
- assert(reg == reg_mcv.register);
+ switch (reg_mcv) {
+ .register,
+ .register_c_flag,
+ .register_v_flag,
+ => |r| assert(r == reg),
+ else => unreachable, // not a register
+ }
+
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
try branch.inst_table.put(self.gpa, inst, stack_mcv);
try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv);
@@ -772,18 +919,32 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
/// Save the current instruction stored in the compare flags if
/// occupied
fn spillCompareFlagsIfOccupied(self: *Self) !void {
- if (self.compare_flags_inst) |inst_to_save| {
+ if (self.cpsr_flags_inst) |inst_to_save| {
const mcv = self.getResolvedInstValue(inst_to_save);
- assert(mcv == .compare_flags_signed or mcv == .compare_flags_unsigned);
+ const new_mcv = switch (mcv) {
+ .cpsr_flags => try self.allocRegOrMem(inst_to_save, true),
+ .register_c_flag,
+ .register_v_flag,
+ => try self.allocRegOrMem(inst_to_save, false),
+ else => unreachable, // mcv doesn't occupy the compare flags
+ };
- const new_mcv = try self.allocRegOrMem(inst_to_save, true);
try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv);
log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv });
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
try branch.inst_table.put(self.gpa, inst_to_save, new_mcv);
- self.compare_flags_inst = null;
+ self.cpsr_flags_inst = null;
+
+ // TODO consolidate with register manager and spillInstruction
+ // this call should really belong in the register manager!
+ switch (mcv) {
+ .register_c_flag,
+ .register_v_flag,
+ => |reg| self.register_manager.freeReg(reg),
+ else => {},
+ }
}
}
@@ -791,28 +952,38 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void {
/// allocated. A second call to `copyToTmpRegister` may return the same register.
/// This can have a side effect of spilling instructions to the stack to free up a register.
fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
- const reg = try self.register_manager.allocReg(null);
+ const reg = try self.register_manager.allocReg(null, gp);
try self.genSetReg(ty, reg, mcv);
return reg;
}
-/// Allocates a new register and copies `mcv` into it.
-/// `reg_owner` is the instruction that gets associated with the register in the register table.
-/// This can have a side effect of spilling instructions to the stack to free up a register.
-fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue {
- const reg = try self.register_manager.allocReg(reg_owner);
- try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv);
- return MCValue{ .register = reg };
-}
-
fn airAlloc(self: *Self, inst: Air.Inst.Index) !void {
const stack_offset = try self.allocMemPtr(inst);
return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none });
}
fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void {
- const stack_offset = try self.allocMemPtr(inst);
- return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none });
+ const result: MCValue = switch (self.ret_mcv) {
+ .none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) },
+ .stack_offset => blk: {
+ // self.ret_mcv is an address to where this function
+ // should store its result into
+ const ret_ty = self.fn_type.fnReturnType();
+ var ptr_ty_payload: Type.Payload.ElemType = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .data = ret_ty,
+ };
+ const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+
+ // addr_reg will contain the address of where to store the
+ // result into
+ const addr_reg = try self.copyToTmpRegister(ptr_ty, self.ret_mcv);
+ break :blk .{ .register = addr_reg };
+ },
+ else => unreachable, // invalid return result
+ };
+
+ return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void {
@@ -832,30 +1003,119 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
- const operand_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
+ const operand_ty = self.air.typeOf(ty_op.operand);
+ const dest_ty = self.air.typeOfIndex(inst);
+
+ const operand_abi_size = operand_ty.abiSize(self.target.*);
+ const dest_abi_size = dest_ty.abiSize(self.target.*);
const info_a = operand_ty.intInfo(self.target.*);
- const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*);
- if (info_a.signedness != info_b.signedness)
- return self.fail("TODO gen intcast sign safety in semantic analysis", .{});
+ const info_b = dest_ty.intInfo(self.target.*);
- if (info_a.bits == info_b.bits)
- return self.finishAir(inst, operand, .{ ty_op.operand, .none, .none });
+ const dst_mcv: MCValue = blk: {
+ if (info_a.bits == info_b.bits) {
+ break :blk operand;
+ }
+ if (operand_abi_size > 4 or dest_abi_size > 4) {
+ return self.fail("TODO implement intCast for abi sizes larger than 4", .{});
+ }
- return self.fail("TODO implement intCast for {}", .{self.target.cpu.arch});
- // return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ const operand_lock: ?RegisterLock = switch (operand) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const reg = try self.register_manager.allocReg(inst, gp);
+ try self.genSetReg(dest_ty, reg, operand);
+ break :blk MCValue{ .register = reg };
+ };
+
+ return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
+}
+
+fn truncRegister(
+ self: *Self,
+ operand_reg: Register,
+ dest_reg: Register,
+ int_signedness: std.builtin.Signedness,
+ int_bits: u16,
+) !void {
+ // TODO check if sxtb/uxtb/sxth/uxth are more efficient
+ _ = try self.addInst(.{
+ .tag = switch (int_signedness) {
+ .signed => .sbfx,
+ .unsigned => .ubfx,
+ },
+ .data = .{ .rr_lsb_width = .{
+ .rd = dest_reg,
+ .rn = operand_reg,
+ .lsb = 0,
+ .width = @intCast(u6, int_bits),
+ } },
+ });
+}
+
+fn trunc(
+ self: *Self,
+ maybe_inst: ?Air.Inst.Index,
+ operand: MCValue,
+ operand_ty: Type,
+ dest_ty: Type,
+) !MCValue {
+ const info_a = operand_ty.intInfo(self.target.*);
+ const info_b = dest_ty.intInfo(self.target.*);
+
+ if (info_b.bits <= 32) {
+ const operand_reg = switch (operand) {
+ .register => |r| r,
+ else => operand_reg: {
+ if (info_a.bits <= 32) {
+ break :operand_reg try self.copyToTmpRegister(operand_ty, operand);
+ } else {
+ return self.fail("TODO load least significant word into register", .{});
+ }
+ },
+ };
+ const operand_reg_lock = self.register_manager.lockReg(operand_reg);
+ defer if (operand_reg_lock) |reg| self.register_manager.unlockReg(reg);
+
+ const dest_reg = if (maybe_inst) |inst| blk: {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+
+ if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
+ break :blk operand_reg;
+ } else {
+ break :blk try self.register_manager.allocReg(inst, gp);
+ }
+ } else try self.register_manager.allocReg(null, gp);
+
+ switch (info_b.bits) {
+ 32 => {
+ try self.genSetReg(operand_ty, dest_reg, .{ .register = operand_reg });
+ return MCValue{ .register = dest_reg };
+ },
+ else => {
+ try self.truncRegister(operand_reg, dest_reg, info_b.signedness, info_b.bits);
+ return MCValue{ .register = dest_reg };
+ },
+ }
+ } else {
+ return self.fail("TODO: truncate to ints > 32 bits", .{});
+ }
}
fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- if (self.liveness.isUnused(inst))
- return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
-
const operand = try self.resolveInst(ty_op.operand);
- _ = operand;
+ const operand_ty = self.air.typeOf(ty_op.operand);
+ const dest_ty = self.air.typeOfIndex(inst);
- return self.fail("TODO implement trunc for {}", .{self.target.cpu.arch});
- // return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: {
+ break :blk try self.trunc(inst, operand, operand_ty, dest_ty);
+ };
+
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void {
@@ -869,71 +1129,253 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(ty_op.operand);
+ const operand_ty = self.air.typeOf(ty_op.operand);
switch (operand) {
.dead => unreachable,
.unreach => unreachable,
- .compare_flags_unsigned => |op| {
- const r = MCValue{
- .compare_flags_unsigned = switch (op) {
- .gte => .lt,
- .gt => .lte,
- .neq => .eq,
- .lt => .gte,
- .lte => .gt,
- .eq => .neq,
+ .cpsr_flags => |cond| break :result MCValue{ .cpsr_flags = cond.negate() },
+ else => {
+ switch (operand_ty.zigTypeTag()) {
+ .Bool => {
+ const op_reg = switch (operand) {
+ .register => |r| r,
+ else => try self.copyToTmpRegister(operand_ty, operand),
+ };
+ const op_reg_lock = self.register_manager.lockRegAssumeUnused(op_reg);
+ defer self.register_manager.unlockReg(op_reg_lock);
+
+ const dest_reg = blk: {
+ if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
+ break :blk op_reg;
+ }
+
+ break :blk try self.register_manager.allocReg(null, gp);
+ };
+
+ _ = try self.addInst(.{
+ .tag = .eor,
+ .data = .{ .rr_op = .{
+ .rd = dest_reg,
+ .rn = op_reg,
+ .op = Instruction.Operand.fromU32(1).?,
+ } },
+ });
+
+ break :result MCValue{ .register = dest_reg };
},
- };
- break :result r;
- },
- .compare_flags_signed => |op| {
- const r = MCValue{
- .compare_flags_signed = switch (op) {
- .gte => .lt,
- .gt => .lte,
- .neq => .eq,
- .lt => .gte,
- .lte => .gt,
- .eq => .neq,
+ .Vector => return self.fail("TODO bitwise not for vectors", .{}),
+ .Int => {
+ const int_info = operand_ty.intInfo(self.target.*);
+ if (int_info.bits <= 32) {
+ const op_reg = switch (operand) {
+ .register => |r| r,
+ else => try self.copyToTmpRegister(operand_ty, operand),
+ };
+ const op_reg_lock = self.register_manager.lockRegAssumeUnused(op_reg);
+ defer self.register_manager.unlockReg(op_reg_lock);
+
+ const dest_reg = blk: {
+ if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
+ break :blk op_reg;
+ }
+
+ break :blk try self.register_manager.allocReg(null, gp);
+ };
+
+ _ = try self.addInst(.{
+ .tag = .mvn,
+ .data = .{ .rr_op = .{
+ .rd = dest_reg,
+ .rn = undefined,
+ .op = Instruction.Operand.reg(op_reg, Instruction.Operand.Shift.none),
+ } },
+ });
+
+ if (int_info.bits < 32) {
+ try self.truncRegister(dest_reg, dest_reg, int_info.signedness, int_info.bits);
+ }
+
+ break :result MCValue{ .register = dest_reg };
+ } else {
+ return self.fail("TODO ARM not on integers > u32/i32", .{});
+ }
},
- };
- break :result r;
- },
- else => {
- break :result try self.genArmBinOp(inst, ty_op.operand, .bool_true, .not);
+ else => unreachable,
+ }
},
}
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airMin(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement min for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+fn minMax(
+ self: *Self,
+ tag: Air.Inst.Tag,
+ maybe_inst: ?Air.Inst.Index,
+ lhs: MCValue,
+ rhs: MCValue,
+ lhs_ty: Type,
+ rhs_ty: Type,
+) !MCValue {
+ switch (lhs_ty.zigTypeTag()) {
+ .Float => return self.fail("TODO ARM min/max on floats", .{}),
+ .Vector => return self.fail("TODO ARM min/max on vectors", .{}),
+ .Int => {
+ const mod = self.bin_file.options.module.?;
+ assert(lhs_ty.eql(rhs_ty, mod));
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 32) {
+ const lhs_is_register = lhs == .register;
+ const rhs_is_register = rhs == .register;
+
+ const lhs_reg = switch (lhs) {
+ .register => |r| r,
+ else => try self.copyToTmpRegister(lhs_ty, lhs),
+ };
+ const lhs_reg_lock = self.register_manager.lockReg(lhs_reg);
+ defer if (lhs_reg_lock) |reg| self.register_manager.unlockReg(reg);
+
+ const rhs_reg = switch (rhs) {
+ .register => |r| r,
+ else => try self.copyToTmpRegister(rhs_ty, rhs),
+ };
+ const rhs_reg_lock = self.register_manager.lockReg(rhs_reg);
+ defer if (rhs_reg_lock) |reg| self.register_manager.unlockReg(reg);
+
+ const dest_reg = if (maybe_inst) |inst| blk: {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+
+ if (lhs_is_register and self.reuseOperand(inst, bin_op.lhs, 0, lhs)) {
+ break :blk lhs_reg;
+ } else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) {
+ break :blk rhs_reg;
+ } else {
+ break :blk try self.register_manager.allocReg(inst, gp);
+ }
+ } else try self.register_manager.allocReg(null, gp);
+
+ // lhs == reg should have been checked by airMinMax
+ //
+ // By guaranteeing lhs != rhs, we guarantee (dst !=
+ // lhs) or (dst != rhs), which is a property we use to
+ // omit generating one instruction when we reuse a
+ // register.
+ assert(lhs_reg != rhs_reg); // see note above
+
+ _ = try self.binOpRegister(.cmp, .{ .register = lhs_reg }, .{ .register = rhs_reg }, lhs_ty, rhs_ty, null);
+
+ const cond_choose_lhs: Condition = switch (tag) {
+ .max => switch (int_info.signedness) {
+ .signed => Condition.gt,
+ .unsigned => Condition.hi,
+ },
+ .min => switch (int_info.signedness) {
+ .signed => Condition.lt,
+ .unsigned => Condition.cc,
+ },
+ else => unreachable,
+ };
+ const cond_choose_rhs = cond_choose_lhs.negate();
+
+ if (dest_reg != lhs_reg) {
+ _ = try self.addInst(.{
+ .tag = .mov,
+ .cond = cond_choose_lhs,
+ .data = .{ .rr_op = .{
+ .rd = dest_reg,
+ .rn = .r0,
+ .op = Instruction.Operand.reg(lhs_reg, Instruction.Operand.Shift.none),
+ } },
+ });
+ }
+ if (dest_reg != rhs_reg) {
+ _ = try self.addInst(.{
+ .tag = .mov,
+ .cond = cond_choose_rhs,
+ .data = .{ .rr_op = .{
+ .rd = dest_reg,
+ .rn = .r0,
+ .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none),
+ } },
+ });
+ }
+
+ return MCValue{ .register = dest_reg };
+ } else {
+ return self.fail("TODO ARM min/max on integers > u32/i32", .{});
+ }
+ },
+ else => unreachable,
+ }
}
-fn airMax(self: *Self, inst: Air.Inst.Index) !void {
+fn airMinMax(self: *Self, inst: Air.Inst.Index) !void {
+ const tag = self.air.instructions.items(.tag)[inst];
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement max for {}", .{self.target.cpu.arch});
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const lhs_ty = self.air.typeOf(bin_op.lhs);
+ const rhs_ty = self.air.typeOf(bin_op.rhs);
+
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ if (bin_op.lhs == bin_op.rhs) break :result lhs;
+
+ break :result try self.minMax(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
+ };
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const ptr = try self.resolveInst(bin_op.lhs);
+ const ptr_ty = self.air.typeOf(bin_op.lhs);
+ const len = try self.resolveInst(bin_op.rhs);
+ const len_ty = self.air.typeOf(bin_op.rhs);
+
+ const stack_offset = try self.allocMem(inst, 8, 4);
+ try self.genSetStack(ptr_ty, stack_offset, ptr);
+ try self.genSetStack(len_ty, stack_offset - 4, len);
+ break :result MCValue{ .stack_offset = stack_offset };
+ };
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airAdd(self: *Self, inst: Air.Inst.Index) !void {
+fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .add);
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const lhs_ty = self.air.typeOf(bin_op.lhs);
+ const rhs_ty = self.air.typeOf(bin_op.rhs);
+
+ const result: MCValue = if (self.liveness.isUnused(inst))
+ .dead
+ else
+ try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{
+ .lhs = bin_op.lhs,
+ .rhs = bin_op.rhs,
+ .inst = inst,
+ });
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airAddWrap(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement addwrap for {}", .{self.target.cpu.arch});
+fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const lhs_ty = self.air.typeOf(bin_op.lhs);
+ const rhs_ty = self.air.typeOf(bin_op.rhs);
+
+ const result: MCValue = if (self.liveness.isUnused(inst))
+ .dead
+ else
+ try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{
+ .lhs = bin_op.lhs,
+ .rhs = bin_op.rhs,
+ .inst = inst,
+ });
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -943,60 +1385,348 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airSub(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .sub);
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
-fn airSubWrap(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement subwrap for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement sub_sat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airMul(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genArmMul(inst, bin_op.lhs, bin_op.rhs);
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
-fn airMulWrap(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement mulwrap for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement mul_sat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
- _ = inst;
- return self.fail("TODO implement airAddWithOverflow for {}", .{self.target.cpu.arch});
-}
+fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
+ const tag = self.air.instructions.items(.tag)[inst];
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const lhs = try self.resolveInst(extra.lhs);
+ const rhs = try self.resolveInst(extra.rhs);
+ const lhs_ty = self.air.typeOf(extra.lhs);
+ const rhs_ty = self.air.typeOf(extra.rhs);
+
+ const tuple_ty = self.air.typeOfIndex(inst);
+ const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*));
+ const tuple_align = tuple_ty.abiAlignment(self.target.*);
+ const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*));
+
+ switch (lhs_ty.zigTypeTag()) {
+ .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
+ .Int => {
+ const mod = self.bin_file.options.module.?;
+ assert(lhs_ty.eql(rhs_ty, mod));
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits < 32) {
+ const stack_offset = try self.allocMem(inst, tuple_size, tuple_align);
+
+ try self.spillCompareFlagsIfOccupied();
+ self.cpsr_flags_inst = null;
+
+ const base_tag: Air.Inst.Tag = switch (tag) {
+ .add_with_overflow => .add,
+ .sub_with_overflow => .sub,
+ else => unreachable,
+ };
+ const dest = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, null);
+ const dest_reg = dest.register;
+ const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
+ defer self.register_manager.unlockReg(dest_reg_lock);
+
+ const truncated_reg = try self.register_manager.allocReg(null, gp);
+ const truncated_reg_lock = self.register_manager.lockRegAssumeUnused(truncated_reg);
+ defer self.register_manager.unlockReg(truncated_reg_lock);
+
+ // sbfx/ubfx truncated, dest, #0, #bits
+ try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
+
+ // cmp dest, truncated
+ _ = try self.binOp(.cmp_eq, dest, .{ .register = truncated_reg }, Type.usize, Type.usize, null);
+
+ try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
+ try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne });
+
+ break :result MCValue{ .stack_offset = stack_offset };
+ } else if (int_info.bits == 32) {
+ // Only say yes if the operation is
+ // commutative, i.e. we can swap both of the
+ // operands
+ const lhs_immediate_ok = switch (tag) {
+ .add_with_overflow => lhs == .immediate and Instruction.Operand.fromU32(lhs.immediate) != null,
+ .sub_with_overflow => false,
+ else => unreachable,
+ };
+ const rhs_immediate_ok = switch (tag) {
+ .add_with_overflow,
+ .sub_with_overflow,
+ => rhs == .immediate and Instruction.Operand.fromU32(rhs.immediate) != null,
+ else => unreachable,
+ };
-fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
- _ = inst;
- return self.fail("TODO implement airSubWithOverflow for {}", .{self.target.cpu.arch});
+ const mir_tag: Mir.Inst.Tag = switch (tag) {
+ .add_with_overflow => .adds,
+ .sub_with_overflow => .subs,
+ else => unreachable,
+ };
+
+ try self.spillCompareFlagsIfOccupied();
+ self.cpsr_flags_inst = inst;
+
+ const dest = blk: {
+ if (rhs_immediate_ok) {
+ break :blk try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, null);
+ } else if (lhs_immediate_ok) {
+ // swap lhs and rhs
+ break :blk try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, null);
+ } else {
+ break :blk try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, null);
+ }
+ };
+
+ if (tag == .sub_with_overflow) {
+ break :result MCValue{ .register_v_flag = dest.register };
+ }
+
+ switch (int_info.signedness) {
+ .unsigned => break :result MCValue{ .register_c_flag = dest.register },
+ .signed => break :result MCValue{ .register_v_flag = dest.register },
+ }
+ } else {
+ return self.fail("TODO ARM overflow operations on integers > u32/i32", .{});
+ }
+ },
+ else => unreachable,
+ }
+ };
+ return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
- _ = inst;
- return self.fail("TODO implement airMulWithOverflow for {}", .{self.target.cpu.arch});
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
+ const result: MCValue = result: {
+ const lhs = try self.resolveInst(extra.lhs);
+ const rhs = try self.resolveInst(extra.rhs);
+ const lhs_ty = self.air.typeOf(extra.lhs);
+ const rhs_ty = self.air.typeOf(extra.rhs);
+
+ const tuple_ty = self.air.typeOfIndex(inst);
+ const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*));
+ const tuple_align = tuple_ty.abiAlignment(self.target.*);
+ const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*));
+
+ switch (lhs_ty.zigTypeTag()) {
+ .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
+ .Int => {
+ const mod = self.bin_file.options.module.?;
+ assert(lhs_ty.eql(rhs_ty, mod));
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 16) {
+ const stack_offset = try self.allocMem(inst, tuple_size, tuple_align);
+
+ try self.spillCompareFlagsIfOccupied();
+ self.cpsr_flags_inst = null;
+
+ const base_tag: Mir.Inst.Tag = switch (int_info.signedness) {
+ .signed => .smulbb,
+ .unsigned => .mul,
+ };
+
+ const dest = try self.binOpRegister(base_tag, lhs, rhs, lhs_ty, rhs_ty, null);
+ const dest_reg = dest.register;
+ const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
+ defer self.register_manager.unlockReg(dest_reg_lock);
+
+ const truncated_reg = try self.register_manager.allocReg(null, gp);
+ const truncated_reg_lock = self.register_manager.lockRegAssumeUnused(truncated_reg);
+ defer self.register_manager.unlockReg(truncated_reg_lock);
+
+ // sbfx/ubfx truncated, dest, #0, #bits
+ try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
+
+ // cmp dest, truncated
+ _ = try self.binOp(.cmp_eq, dest, .{ .register = truncated_reg }, Type.usize, Type.usize, null);
+
+ try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
+ try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne });
+
+ break :result MCValue{ .stack_offset = stack_offset };
+ } else if (int_info.bits <= 32) {
+ const stack_offset = try self.allocMem(inst, tuple_size, tuple_align);
+
+ try self.spillCompareFlagsIfOccupied();
+ self.cpsr_flags_inst = null;
+
+ const base_tag: Mir.Inst.Tag = switch (int_info.signedness) {
+ .signed => .smull,
+ .unsigned => .umull,
+ };
+
+ // TODO extract umull etc. to binOpTwoRegister
+ // once MCValue.rr is implemented
+ const lhs_is_register = lhs == .register;
+ const rhs_is_register = rhs == .register;
+
+ const lhs_lock: ?RegisterLock = if (lhs_is_register)
+ self.register_manager.lockReg(lhs.register)
+ else
+ null;
+ defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
+
+ const lhs_reg = if (lhs_is_register)
+ lhs.register
+ else
+ try self.register_manager.allocReg(null, gp);
+ const new_lhs_lock = self.register_manager.lockReg(lhs_reg);
+ defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
+
+ const rhs_reg = if (rhs_is_register)
+ rhs.register
+ else
+ try self.register_manager.allocReg(null, gp);
+ const new_rhs_lock = self.register_manager.lockReg(rhs_reg);
+ defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg);
+
+ const dest_regs = try self.register_manager.allocRegs(2, .{ null, null }, gp);
+ const dest_regs_locks = self.register_manager.lockRegsAssumeUnused(2, dest_regs);
+ defer for (dest_regs_locks) |reg| {
+ self.register_manager.unlockReg(reg);
+ };
+ const rdlo = dest_regs[0];
+ const rdhi = dest_regs[1];
+
+ if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
+ if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
+
+ const truncated_reg = try self.register_manager.allocReg(null, gp);
+ const truncated_reg_lock = self.register_manager.lockRegAssumeUnused(truncated_reg);
+ defer self.register_manager.unlockReg(truncated_reg_lock);
+
+ _ = try self.addInst(.{
+ .tag = base_tag,
+ .data = .{ .rrrr = .{
+ .rdlo = rdlo,
+ .rdhi = rdhi,
+ .rn = lhs_reg,
+ .rm = rhs_reg,
+ } },
+ });
+
+ // sbfx/ubfx truncated, rdlo, #0, #bits
+ try self.truncRegister(rdlo, truncated_reg, int_info.signedness, int_info.bits);
+
+ // str truncated, [...]
+ try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
+
+ // cmp truncated, rdlo
+ _ = try self.binOp(.cmp_eq, .{ .register = truncated_reg }, .{ .register = rdlo }, Type.usize, Type.usize, null);
+
+ // mov rdlo, #0
+ _ = try self.addInst(.{
+ .tag = .mov,
+ .data = .{ .rr_op = .{
+ .rd = rdlo,
+ .rn = .r0,
+ .op = Instruction.Operand.fromU32(0).?,
+ } },
+ });
+
+ // movne rdlo, #1
+ _ = try self.addInst(.{
+ .tag = .mov,
+ .cond = .ne,
+ .data = .{ .rr_op = .{
+ .rd = rdlo,
+ .rn = .r0,
+ .op = Instruction.Operand.fromU32(1).?,
+ } },
+ });
+
+ // cmp rdhi, #0
+ _ = try self.binOp(.cmp_eq, .{ .register = rdhi }, .{ .immediate = 0 }, Type.usize, Type.usize, null);
+
+ // movne rdlo, #1
+ _ = try self.addInst(.{
+ .tag = .mov,
+ .cond = .ne,
+ .data = .{ .rr_op = .{
+ .rd = rdlo,
+ .rn = .r0,
+ .op = Instruction.Operand.fromU32(1).?,
+ } },
+ });
+
+ // strb rdlo, [...]
+ try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .register = rdlo });
+
+ break :result MCValue{ .stack_offset = stack_offset };
+ } else {
+ return self.fail("TODO ARM overflow operations on integers > u32/i32", .{});
+ }
+ },
+ else => unreachable,
+ }
+ };
+ return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
- _ = inst;
- return self.fail("TODO implement airShlWithOverflow for {}", .{self.target.cpu.arch});
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
+ const result: MCValue = result: {
+ const lhs = try self.resolveInst(extra.lhs);
+ const rhs = try self.resolveInst(extra.rhs);
+ const lhs_ty = self.air.typeOf(extra.lhs);
+ const rhs_ty = self.air.typeOf(extra.rhs);
+
+ const tuple_ty = self.air.typeOfIndex(inst);
+ const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*));
+ const tuple_align = tuple_ty.abiAlignment(self.target.*);
+ const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*));
+
+ switch (lhs_ty.zigTypeTag()) {
+ .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}),
+ .Int => {
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 32) {
+ const stack_offset = try self.allocMem(inst, tuple_size, tuple_align);
+
+ const lhs_lock: ?RegisterLock = if (lhs == .register)
+ self.register_manager.lockRegAssumeUnused(lhs.register)
+ else
+ null;
+ defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
+
+ try self.spillCompareFlagsIfOccupied();
+ self.cpsr_flags_inst = null;
+
+ // lsl dest, lhs, rhs
+ const dest = try self.binOp(.shl, lhs, rhs, lhs_ty, rhs_ty, null);
+ const dest_reg = dest.register;
+ const dest_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
+ defer self.register_manager.unlockReg(dest_lock);
+
+ // asr/lsr reconstructed, dest, rhs
+ const reconstructed = try self.binOp(.shr, dest, rhs, lhs_ty, rhs_ty, null);
+
+ // cmp lhs, reconstructed
+ _ = try self.binOp(.cmp_eq, lhs, reconstructed, lhs_ty, lhs_ty, null);
+
+ try self.genSetStack(lhs_ty, stack_offset, dest);
+ try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne });
+
+ break :result MCValue{ .stack_offset = stack_offset };
+ } else {
+ return self.fail("TODO ARM overflow operations on integers > u32/i32", .{});
+ }
+ },
+ else => unreachable,
+ }
+ };
+ return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
fn airDiv(self: *Self, inst: Air.Inst.Index) !void {
@@ -1017,42 +1747,12 @@ fn airMod(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_and);
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
-fn airBitOr(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_or);
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
-fn airXor(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .xor);
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
-fn airShl(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .shl);
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airShr(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .shr);
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .optional_payload for {}", .{self.target.cpu.arch});
@@ -1071,27 +1771,92 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
+fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const optional_ty = self.air.typeOfIndex(inst);
+ const abi_size = @intCast(u32, optional_ty.abiSize(self.target.*));
+
+ // Optional with a zero-bit payload type is just a boolean true
+ if (abi_size == 1) {
+ break :result MCValue{ .immediate = 1 };
+ } else {
+ return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch});
+ }
+ };
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+}
+
+/// Given an error union, returns the error
+fn errUnionErr(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue {
+ const err_ty = error_union_ty.errorUnionSet();
+ const payload_ty = error_union_ty.errorUnionPayload();
+ if (err_ty.errorSetIsEmpty()) {
+ return MCValue{ .immediate = 0 };
+ }
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ return error_union_mcv;
+ }
+
+ const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*));
+ switch (error_union_mcv) {
+ .register => return self.fail("TODO errUnionErr for registers", .{}),
+ .stack_argument_offset => |off| {
+ return MCValue{ .stack_argument_offset = off - err_offset };
+ },
+ .stack_offset => |off| {
+ return MCValue{ .stack_offset = off - err_offset };
+ },
+ .memory => |addr| {
+ return MCValue{ .memory = addr + err_offset };
+ },
+ else => unreachable, // invalid MCValue for an error union
+ }
+}
+
fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = self.air.typeOf(ty_op.operand);
- const payload_ty = error_union_ty.errorUnionPayload();
const mcv = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBits()) break :result mcv;
-
- return self.fail("TODO implement unwrap error union error for non-empty payloads", .{});
+ break :result try self.errUnionErr(mcv, error_union_ty);
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
+/// Given an error union, returns the payload
+fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue {
+ const err_ty = error_union_ty.errorUnionSet();
+ const payload_ty = error_union_ty.errorUnionPayload();
+ if (err_ty.errorSetIsEmpty()) {
+ return error_union_mcv;
+ }
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ return MCValue.none;
+ }
+
+ const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*));
+ switch (error_union_mcv) {
+ .register => return self.fail("TODO errUnionPayload for registers", .{}),
+ .stack_argument_offset => |off| {
+ return MCValue{ .stack_argument_offset = off - payload_offset };
+ },
+ .stack_offset => |off| {
+ return MCValue{ .stack_offset = off - payload_offset };
+ },
+ .memory => |addr| {
+ return MCValue{ .memory = addr + payload_offset };
+ },
+ else => unreachable, // invalid MCValue for an error union
+ }
+}
+
fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = self.air.typeOf(ty_op.operand);
- const payload_ty = error_union_ty.errorUnionPayload();
- if (!payload_ty.hasRuntimeBits()) break :result MCValue.none;
-
- return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{});
+ const error_union = try self.resolveInst(ty_op.operand);
+ break :result try self.errUnionPayload(error_union, error_union_ty);
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -1110,24 +1875,45 @@ fn airUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
+fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const optional_ty = self.air.typeOfIndex(inst);
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .errunion_payload_ptr_set for {}", .{self.target.cpu.arch});
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+}
- // Optional with a zero-bit payload type is just a boolean true
- if (optional_ty.abiSize(self.target.*) == 1)
- break :result MCValue{ .immediate = 1 };
+fn airErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void {
+ _ = inst;
+ const result: MCValue = if (self.liveness.isUnused(inst))
+ .dead
+ else
+ return self.fail("TODO implement airErrReturnTrace for {}", .{self.target.cpu.arch});
+ return self.finishAir(inst, result, .{ .none, .none, .none });
+}
- return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch});
- };
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+fn airSetErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void {
+ _ = inst;
+ return self.fail("TODO implement airSetErrReturnTrace for {}", .{self.target.cpu.arch});
}
/// T to E!T
fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement wrap errunion payload for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const error_union_ty = self.air.getRefType(ty_op.ty);
+ const payload_ty = error_union_ty.errorUnionPayload();
+ const operand = try self.resolveInst(ty_op.operand);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand;
+
+ const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*));
+ const abi_align = error_union_ty.abiAlignment(self.target.*);
+ const stack_offset = @intCast(u32, try self.allocMem(inst, abi_size, abi_align));
+ const payload_off = errUnionPayloadOffset(payload_ty, self.target.*);
+ const err_off = errUnionErrorOffset(payload_ty, self.target.*);
+ try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand);
+ try self.genSetStack(Type.anyerror, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 });
+
+ break :result MCValue{ .stack_offset = stack_offset };
+ };
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -1137,32 +1923,44 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = self.air.getRefType(ty_op.ty);
const payload_ty = error_union_ty.errorUnionPayload();
- const mcv = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBits()) break :result mcv;
+ const operand = try self.resolveInst(ty_op.operand);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand;
- return self.fail("TODO implement wrap errunion error for non-empty payloads", .{});
+ const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*));
+ const abi_align = error_union_ty.abiAlignment(self.target.*);
+ const stack_offset = @intCast(u32, try self.allocMem(inst, abi_size, abi_align));
+ const payload_off = errUnionPayloadOffset(payload_ty, self.target.*);
+ const err_off = errUnionErrorOffset(payload_ty, self.target.*);
+ try self.genSetStack(Type.anyerror, stack_offset - @intCast(u32, err_off), operand);
+ try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef);
+
+ break :result MCValue{ .stack_offset = stack_offset };
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
+/// Given a slice, returns the length
+fn slicePtr(mcv: MCValue) MCValue {
+ switch (mcv) {
+ .register => unreachable, // a slice doesn't fit in one register
+ .stack_argument_offset => |off| {
+ return MCValue{ .stack_argument_offset = off };
+ },
+ .stack_offset => |off| {
+ return MCValue{ .stack_offset = off };
+ },
+ .memory => |addr| {
+ return MCValue{ .memory = addr };
+ },
+ else => unreachable, // invalid MCValue for a slice
+ }
+}
+
fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(ty_op.operand);
- switch (mcv) {
- .dead, .unreach => unreachable,
- .register => unreachable, // a slice doesn't fit in one register
- .stack_argument_offset => |off| {
- break :result MCValue{ .stack_argument_offset = off };
- },
- .stack_offset => |off| {
- break :result MCValue{ .stack_offset = off };
- },
- .memory => |addr| {
- break :result MCValue{ .memory = addr };
- },
- else => return self.fail("TODO implement slice_ptr for {}", .{mcv}),
- }
+ break :result slicePtr(mcv);
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -1175,10 +1973,10 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
.dead, .unreach => unreachable,
.register => unreachable, // a slice doesn't fit in one register
.stack_argument_offset => |off| {
- break :result MCValue{ .stack_argument_offset = off + 4 };
+ break :result MCValue{ .stack_argument_offset = off - 4 };
},
.stack_offset => |off| {
- break :result MCValue{ .stack_offset = off + 4 };
+ break :result MCValue{ .stack_offset = off - 4 };
},
.memory => |addr| {
break :result MCValue{ .memory = addr + 4 };
@@ -1196,7 +1994,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
switch (mcv) {
.dead, .unreach => unreachable,
.ptr_stack_offset => |off| {
- break :result MCValue{ .ptr_stack_offset = off + 4 };
+ break :result MCValue{ .ptr_stack_offset = off - 4 };
},
else => return self.fail("TODO implement ptr_slice_len_ptr for {}", .{mcv}),
}
@@ -1222,85 +2020,81 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else result: {
+
+ if (!is_volatile and self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
+ const result: MCValue = result: {
const slice_mcv = try self.resolveInst(bin_op.lhs);
+ // TODO optimize for the case where the index is a constant,
+ // i.e. index_mcv == .immediate
+ const index_mcv = try self.resolveInst(bin_op.rhs);
+ const index_is_register = index_mcv == .register;
+
const slice_ty = self.air.typeOf(bin_op.lhs);
const elem_ty = slice_ty.childType();
- const elem_size = elem_ty.abiSize(self.target.*);
+ const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf);
- // TODO optimize this for the case when elem_size is a power
- // of two (includes elem_size == 1)
- const offset_mcv = try self.genArmMulConstant(inst, bin_op.rhs, 1, @intCast(u32, elem_size));
- assert(offset_mcv == .register); // result of multiplication should always be register
- self.register_manager.freezeRegs(&.{offset_mcv.register});
- defer self.register_manager.unfreezeRegs(&.{offset_mcv.register});
+ const index_lock: ?RegisterLock = if (index_is_register)
+ self.register_manager.lockRegAssumeUnused(index_mcv.register)
+ else
+ null;
+ defer if (index_lock) |reg| self.register_manager.unlockReg(reg);
- const base_mcv: MCValue = switch (slice_mcv) {
- .stack_offset => .{ .register = try self.copyToTmpRegister(slice_ptr_field_type, slice_mcv) },
- else => return self.fail("TODO slice_elem_val when slice is {}", .{slice_mcv}),
- };
- self.register_manager.freezeRegs(&.{base_mcv.register});
- defer self.register_manager.unfreezeRegs(&.{base_mcv.register});
+ const base_mcv = slicePtr(slice_mcv);
- if (elem_size <= 4) {
- const dst_reg = try self.register_manager.allocReg(inst);
- self.register_manager.freezeRegs(&.{dst_reg});
- defer self.register_manager.unfreezeRegs(&.{dst_reg});
-
- switch (elem_size) {
- 1, 4 => {
- const tag: Mir.Inst.Tag = switch (elem_size) {
- 1 => .ldrb,
- 4 => .ldr,
- else => unreachable,
- };
+ switch (elem_size) {
+ 1, 4 => {
+ const base_reg = switch (base_mcv) {
+ .register => |r| r,
+ else => try self.copyToTmpRegister(slice_ptr_field_type, base_mcv),
+ };
+ const base_reg_lock = self.register_manager.lockRegAssumeUnused(base_reg);
+ defer self.register_manager.unlockReg(base_reg_lock);
- _ = try self.addInst(.{
- .tag = tag,
- .data = .{ .rr_offset = .{
- .rt = dst_reg,
- .rn = base_mcv.register,
- .offset = .{ .offset = Instruction.Offset.reg(offset_mcv.register, 0) },
- } },
- });
- },
- 2 => {
- _ = try self.addInst(.{
- .tag = .ldrh,
- .data = .{ .rr_extra_offset = .{
- .rt = dst_reg,
- .rn = base_mcv.register,
- .offset = .{ .offset = Instruction.ExtraLoadStoreOffset.reg(offset_mcv.register) },
- } },
- });
- },
- else => unreachable,
- }
+ const dst_reg = try self.register_manager.allocReg(inst, gp);
+ const dst_mcv = MCValue{ .register = dst_reg };
+ const dst_reg_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
+ defer self.register_manager.unlockReg(dst_reg_lock);
- break :result MCValue{ .register = dst_reg };
- } else {
- const dst_mcv = try self.allocRegOrMem(inst, false);
-
- const addr_reg = try self.register_manager.allocReg(null);
- self.register_manager.freezeRegs(&.{addr_reg});
- defer self.register_manager.unfreezeRegs(&.{addr_reg});
+ const index_reg: Register = switch (index_mcv) {
+ .register => |reg| reg,
+ else => try self.copyToTmpRegister(Type.usize, index_mcv),
+ };
+ const index_reg_lock = self.register_manager.lockReg(index_reg);
+ defer if (index_reg_lock) |lock| self.register_manager.unlockReg(lock);
- try self.genArmBinOpCode(addr_reg, base_mcv, offset_mcv, false, .add, .unsigned);
+ const tag: Mir.Inst.Tag = switch (elem_size) {
+ 1 => .ldrb,
+ 4 => .ldr,
+ else => unreachable,
+ };
+ const shift: u5 = switch (elem_size) {
+ 1 => 0,
+ 4 => 2,
+ else => unreachable,
+ };
- // I know we will unfreeze these registers at the end of
- // the scope of :result. However, at this point in time,
- // neither the base register nor the offset register
- // contains any valuable data anymore. In order to reduce
- // register pressure, unfreeze them prematurely
- self.register_manager.unfreezeRegs(&.{ base_mcv.register, offset_mcv.register });
+ _ = try self.addInst(.{
+ .tag = tag,
+ .data = .{ .rr_offset = .{
+ .rt = dst_reg,
+ .rn = base_reg,
+ .offset = .{ .offset = Instruction.Offset.reg(index_reg, .{ .lsl = shift }) },
+ } },
+ });
- try self.load(dst_mcv, .{ .register = addr_reg }, slice_ptr_field_type);
+ break :result dst_mcv;
+ },
+ else => {
+ const dest = try self.allocRegOrMem(inst, true);
+ const addr = try self.binOp(.ptr_add, base_mcv, index_mcv, slice_ptr_field_type, Type.usize, null);
+ try self.load(dest, addr, slice_ptr_field_type);
- break :result dst_mcv;
+ break :result dest;
+ },
}
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
@@ -1309,7 +2103,16 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice_elem_ptr for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const slice_mcv = try self.resolveInst(extra.lhs);
+ const index_mcv = try self.resolveInst(extra.rhs);
+ const base_mcv = slicePtr(slice_mcv);
+
+ const slice_ty = self.air.typeOf(extra.lhs);
+
+ const addr = try self.binOp(.ptr_add, base_mcv, index_mcv, slice_ty, Type.usize, null);
+ break :result addr;
+ };
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
@@ -1329,7 +2132,15 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_elem_ptr for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const ptr_mcv = try self.resolveInst(extra.lhs);
+ const index_mcv = try self.resolveInst(extra.rhs);
+
+ const ptr_ty = self.air.typeOf(extra.lhs);
+
+ const addr = try self.binOp(.ptr_add, ptr_mcv, index_mcv, ptr_ty, Type.usize, null);
+ break :result addr;
+ };
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
@@ -1368,24 +2179,50 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void {
// return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
+fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ _ = ty_op;
+ return self.fail("TODO implement airByteSwap for {}", .{self.target.cpu.arch});
+ // return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+}
+
+fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ _ = ty_op;
+ return self.fail("TODO implement airBitReverse for {}", .{self.target.cpu.arch});
+ // return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+}
+
+fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void {
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const result: MCValue = if (self.liveness.isUnused(inst))
+ .dead
+ else
+ return self.fail("TODO implement airUnaryMath for {}", .{self.target.cpu.arch});
+ return self.finishAir(inst, result, .{ un_op, .none, .none });
+}
+
fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool {
if (!self.liveness.operandDies(inst, op_index))
return false;
switch (mcv) {
.register => |reg| {
- // If it's in the registers table, need to associate the register with the
- // new instruction.
- if (reg.allocIndex()) |index| {
- if (!self.register_manager.isRegFree(reg)) {
- self.register_manager.registers[index] = inst;
- }
+ // We assert that this register is allocatable by asking
+ // for its index
+ const index = RegisterManager.indexOfRegIntoTracked(reg).?; // see note above
+ if (!self.register_manager.isRegFree(reg)) {
+ self.register_manager.registers[index] = inst;
}
+
log.debug("%{d} => {} (reused)", .{ inst, reg });
},
.stack_offset => |off| {
log.debug("%{d} => stack offset {d} (reused)", .{ inst, off });
},
+ .cpsr_flags => {
+ log.debug("%{d} => cpsr_flags (reused)", .{inst});
+ },
else => return false,
}
@@ -1401,83 +2238,45 @@ fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_ind
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
const elem_ty = ptr_ty.elemType();
+ const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
+
switch (ptr) {
.none => unreachable,
.undef => unreachable,
.unreach => unreachable,
.dead => unreachable,
- .compare_flags_unsigned => unreachable,
- .compare_flags_signed => unreachable,
+ .cpsr_flags,
+ .register_c_flag,
+ .register_v_flag,
+ => unreachable, // cannot hold an address
.immediate => |imm| try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm }),
.ptr_stack_offset => |off| try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off }),
- .ptr_embedded_in_code => |off| {
- try self.setRegOrMem(elem_ty, dst_mcv, .{ .embedded_in_code = off });
- },
- .embedded_in_code => {
- return self.fail("TODO implement loading from MCValue.embedded_in_code", .{});
- },
.register => |reg| {
- self.register_manager.freezeRegs(&.{reg});
- defer self.register_manager.unfreezeRegs(&.{reg});
+ const reg_lock = self.register_manager.lockReg(reg);
+ defer if (reg_lock) |reg_locked| self.register_manager.unlockReg(reg_locked);
switch (dst_mcv) {
.dead => unreachable,
.undef => unreachable,
- .compare_flags_signed, .compare_flags_unsigned => unreachable,
- .embedded_in_code => unreachable,
+ .cpsr_flags => unreachable,
.register => |dst_reg| {
- _ = try self.addInst(.{
- .tag = .ldr,
- .data = .{ .rr_offset = .{
- .rt = dst_reg,
- .rn = reg,
- .offset = .{ .offset = Instruction.Offset.none },
- } },
- });
+ try self.genLdrRegister(dst_reg, reg, elem_ty);
},
.stack_offset => |off| {
- if (elem_ty.abiSize(self.target.*) <= 4) {
- const tmp_reg = try self.register_manager.allocReg(null);
- self.register_manager.freezeRegs(&.{tmp_reg});
- defer self.register_manager.unfreezeRegs(&.{tmp_reg});
+ if (elem_size <= 4) {
+ const tmp_reg = try self.register_manager.allocReg(null, gp);
+ const tmp_reg_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
+ defer self.register_manager.unlockReg(tmp_reg_lock);
try self.load(.{ .register = tmp_reg }, ptr, ptr_ty);
try self.genSetStack(elem_ty, off, MCValue{ .register = tmp_reg });
- } else if (elem_ty.abiSize(self.target.*) == 8) {
- // TODO generalize this: maybe add a
- // genArmMemcpy function which manually copies
- // data if the size is below a certain
- // threshold and calls "memcpy" if the size is
- // larger
-
- const usize_ty = Type.initTag(.usize);
- const tmp_regs = try self.register_manager.allocRegs(2, .{ null, null });
- self.register_manager.freezeRegs(&tmp_regs);
- defer self.register_manager.unfreezeRegs(&tmp_regs);
-
- _ = try self.addInst(.{
- .tag = .ldr,
- .data = .{ .rr_offset = .{
- .rt = tmp_regs[0],
- .rn = reg,
- .offset = .{ .offset = Instruction.Offset.none },
- } },
- });
- _ = try self.addInst(.{
- .tag = .ldr,
- .data = .{ .rr_offset = .{
- .rt = tmp_regs[1],
- .rn = reg,
- .offset = .{ .offset = Instruction.Offset.imm(4) },
- } },
- });
- try self.genSetStack(usize_ty, off, MCValue{ .register = tmp_regs[0] });
- try self.genSetStack(usize_ty, off + 4, MCValue{ .register = tmp_regs[1] });
} else {
// TODO optimize the register allocation
- const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null });
- self.register_manager.freezeRegs(&regs);
- defer self.register_manager.unfreezeRegs(&regs);
+ const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, gp);
+ const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs);
+ defer for (regs_locks) |reg_locked| {
+ self.register_manager.unlockReg(reg_locked);
+ };
const src_reg = reg;
const dst_reg = regs[0];
@@ -1486,35 +2285,13 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
const tmp_reg = regs[3];
// sub dst_reg, fp, #off
- const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
- const adj_off = off + elem_size;
- const offset_op: Instruction.Operand = if (Instruction.Operand.fromU32(adj_off)) |x| x else {
- return self.fail("TODO load: set reg to stack offset with all possible offsets", .{});
- };
- _ = try self.addInst(.{
- .tag = .sub,
- .data = .{ .rr_op = .{
- .rd = dst_reg,
- .rn = .fp,
- .op = offset_op,
- } },
- });
+ try self.genSetReg(ptr_ty, dst_reg, .{ .ptr_stack_offset = off });
// mov len, #elem_size
- const len_op: Instruction.Operand = if (Instruction.Operand.fromU32(elem_size)) |x| x else {
- return self.fail("TODO load: set reg to elem_size with all possible sizes", .{});
- };
- _ = try self.addInst(.{
- .tag = .mov,
- .data = .{ .rr_op = .{
- .rd = len_reg,
- .rn = .r0,
- .op = len_op,
- } },
- });
+ try self.genSetReg(Type.usize, len_reg, .{ .immediate = elem_size });
// memcpy(src, dst, len)
- try self.genArmInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg);
+ try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg);
}
},
else => return self.fail("TODO load from register into {}", .{dst_mcv}),
@@ -1524,9 +2301,9 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
.stack_offset,
.stack_argument_offset,
=> {
- const reg = try self.register_manager.allocReg(null);
- self.register_manager.freezeRegs(&.{reg});
- defer self.register_manager.unfreezeRegs(&.{reg});
+ const reg = try self.register_manager.allocReg(null, gp);
+ const reg_lock = self.register_manager.lockRegAssumeUnused(reg);
+ defer self.register_manager.unlockReg(reg_lock);
try self.genSetReg(ptr_ty, reg, ptr);
try self.load(dst_mcv, .{ .register = reg }, ptr_ty);
@@ -1561,50 +2338,77 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
}
fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
+ const elem_size = @intCast(u32, value_ty.abiSize(self.target.*));
+
switch (ptr) {
.none => unreachable,
.undef => unreachable,
.unreach => unreachable,
.dead => unreachable,
- .compare_flags_unsigned => unreachable,
- .compare_flags_signed => unreachable,
+ .cpsr_flags,
+ .register_c_flag,
+ .register_v_flag,
+ => unreachable, // cannot hold an address
.immediate => |imm| {
try self.setRegOrMem(value_ty, .{ .memory = imm }, value);
},
.ptr_stack_offset => |off| {
try self.genSetStack(value_ty, off, value);
},
- .ptr_embedded_in_code => |off| {
- try self.setRegOrMem(value_ty, .{ .embedded_in_code = off }, value);
- },
- .embedded_in_code => {
- return self.fail("TODO implement storing to MCValue.embedded_in_code", .{});
- },
.register => |addr_reg| {
- self.register_manager.freezeRegs(&.{addr_reg});
- defer self.register_manager.unfreezeRegs(&.{addr_reg});
+ const addr_reg_lock = self.register_manager.lockReg(addr_reg);
+ defer if (addr_reg_lock) |reg| self.register_manager.unlockReg(reg);
switch (value) {
+ .dead => unreachable,
+ .undef => unreachable,
.register => |value_reg| {
- _ = try self.addInst(.{
- .tag = .str,
- .data = .{ .rr_offset = .{
- .rt = value_reg,
- .rn = addr_reg,
- .offset = .{ .offset = Instruction.Offset.none },
- } },
- });
+ try self.genStrRegister(value_reg, addr_reg, value_ty);
},
else => {
- if (value_ty.abiSize(self.target.*) <= 4) {
- const tmp_reg = try self.register_manager.allocReg(null);
- self.register_manager.freezeRegs(&.{tmp_reg});
- defer self.register_manager.unfreezeRegs(&.{tmp_reg});
+ if (elem_size <= 4) {
+ const tmp_reg = try self.register_manager.allocReg(null, gp);
+ const tmp_reg_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
+ defer self.register_manager.unlockReg(tmp_reg_lock);
try self.genSetReg(value_ty, tmp_reg, value);
try self.store(ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
} else {
- return self.fail("TODO implement memcpy", .{});
+ const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, gp);
+ const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs);
+ defer for (regs_locks) |reg| {
+ self.register_manager.unlockReg(reg);
+ };
+
+ const src_reg = regs[0];
+ const dst_reg = addr_reg;
+ const len_reg = regs[1];
+ const count_reg = regs[2];
+ const tmp_reg = regs[3];
+
+ switch (value) {
+ .stack_offset => |off| {
+ // sub src_reg, fp, #off
+ try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off });
+ },
+ .memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = @intCast(u32, addr) }),
+ .stack_argument_offset => |off| {
+ _ = try self.addInst(.{
+ .tag = .ldr_ptr_stack_argument,
+ .data = .{ .r_stack_offset = .{
+ .rt = src_reg,
+ .stack_offset = off,
+ } },
+ });
+ },
+ else => return self.fail("TODO store {} to register", .{value}),
+ }
+
+ // mov len, #elem_size
+ try self.genSetReg(Type.usize, len_reg, .{ .immediate = elem_size });
+
+ // memcpy(src, dst, len)
+ try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg);
}
},
}
@@ -1647,16 +2451,35 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
return if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(operand);
- const struct_ty = self.air.typeOf(operand).childType();
- const struct_size = @intCast(u32, struct_ty.abiSize(self.target.*));
+ const ptr_ty = self.air.typeOf(operand);
+ const struct_ty = ptr_ty.childType();
const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
- const struct_field_ty = struct_ty.structFieldType(index);
- const struct_field_size = @intCast(u32, struct_field_ty.abiSize(self.target.*));
switch (mcv) {
.ptr_stack_offset => |off| {
- break :result MCValue{ .ptr_stack_offset = off + struct_size - struct_field_offset - struct_field_size };
+ break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
+ },
+ else => {
+ const offset_reg = try self.copyToTmpRegister(ptr_ty, .{
+ .immediate = struct_field_offset,
+ });
+ const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
+ defer self.register_manager.unlockReg(offset_reg_lock);
+
+ const addr_reg = try self.copyToTmpRegister(ptr_ty, mcv);
+ const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
+ defer self.register_manager.unlockReg(addr_reg_lock);
+
+ const dest = try self.binOp(
+ .add,
+ .{ .register = addr_reg },
+ .{ .register = offset_reg },
+ Type.usize,
+ Type.usize,
+ null,
+ );
+
+ break :result dest;
},
- else => return self.fail("TODO implement codegen struct_field_ptr for {}", .{mcv}),
}
};
}
@@ -1669,13 +2492,48 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(operand);
const struct_ty = self.air.typeOf(operand);
- const struct_size = @intCast(u32, struct_ty.abiSize(self.target.*));
const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
- const struct_field_ty = struct_ty.structFieldType(index);
- const struct_field_size = @intCast(u32, struct_field_ty.abiSize(self.target.*));
+
switch (mcv) {
+ .dead, .unreach => unreachable,
+ .stack_argument_offset => |off| {
+ break :result MCValue{ .stack_argument_offset = off - struct_field_offset };
+ },
.stack_offset => |off| {
- break :result MCValue{ .stack_offset = off + struct_size - struct_field_offset - struct_field_size };
+ break :result MCValue{ .stack_offset = off - struct_field_offset };
+ },
+ .memory => |addr| {
+ break :result MCValue{ .memory = addr + struct_field_offset };
+ },
+ .register_c_flag,
+ .register_v_flag,
+ => |reg| {
+ const reg_lock = self.register_manager.lockRegAssumeUnused(reg);
+ defer self.register_manager.unlockReg(reg_lock);
+
+ const field: MCValue = switch (index) {
+ // get wrapped value: return register
+ 0 => MCValue{ .register = reg },
+
+ // get overflow bit: return C or V flag
+ 1 => MCValue{ .cpsr_flags = switch (mcv) {
+ .register_c_flag => .cs,
+ .register_v_flag => .vs,
+ else => unreachable,
+ } },
+
+ else => unreachable,
+ };
+
+ if (self.reuseOperand(inst, operand, 0, field)) {
+ break :result field;
+ } else {
+ // Copy to new register
+ const dest_reg = try self.register_manager.allocReg(null, gp);
+ try self.genSetReg(struct_ty.structFieldType(index), dest_reg, field);
+
+ break :result MCValue{ .register = dest_reg };
+ }
},
else => return self.fail("TODO implement codegen struct_field_val for {}", .{mcv}),
}
@@ -1684,413 +2542,606 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none });
}
-fn armOperandShouldBeRegister(self: *Self, mcv: MCValue) !bool {
- return switch (mcv) {
- .none => unreachable,
- .undef => unreachable,
- .dead, .unreach => unreachable,
- .compare_flags_unsigned => unreachable,
- .compare_flags_signed => unreachable,
- .ptr_stack_offset => unreachable,
- .ptr_embedded_in_code => unreachable,
- .immediate => |imm| blk: {
- if (imm > std.math.maxInt(u32)) return self.fail("TODO ARM binary arithmetic immediate larger than u32", .{});
-
- // Load immediate into register if it doesn't fit
- // in an operand
- break :blk Instruction.Operand.fromU32(@intCast(u32, imm)) == null;
- },
- .register => true,
- .stack_offset,
- .stack_argument_offset,
- .embedded_in_code,
- .memory,
- => true,
- };
+fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFieldParentPtr", .{});
+ return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn genArmBinOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref, op: Air.Inst.Tag) !MCValue {
- // In the case of bitshifts, the type of rhs is different
- // from the resulting type
- const ty = self.air.typeOf(op_lhs);
+/// Allocates a new register. If Inst in non-null, additionally tracks
+/// this register and the corresponding int and removes all previous
+/// tracking. Does not do the actual moving (that is handled by
+/// genSetReg).
+fn prepareNewRegForMoving(
+ self: *Self,
+ track_inst: ?Air.Inst.Index,
+ register_class: RegisterManager.RegisterBitSet,
+ mcv: MCValue,
+) !Register {
+ const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
+ const reg = try self.register_manager.allocReg(track_inst, register_class);
+
+ if (track_inst) |inst| {
+ // Overwrite the MCValue associated with this inst
+ branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
- switch (ty.zigTypeTag()) {
- .Float => return self.fail("TODO ARM binary operations on floats", .{}),
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Bool => {
- return self.genArmBinIntOp(inst, op_lhs, op_rhs, op, 1, .unsigned);
- },
- .Int => {
- const int_info = ty.intInfo(self.target.*);
- return self.genArmBinIntOp(inst, op_lhs, op_rhs, op, int_info.bits, int_info.signedness);
- },
- else => unreachable,
+ // If the previous MCValue occupied some space we track, we
+ // need to make sure it is marked as free now.
+ switch (mcv) {
+ .cpsr_flags => {
+ assert(self.cpsr_flags_inst.? == inst);
+ self.cpsr_flags_inst = null;
+ },
+ .register => |prev_reg| {
+ assert(!self.register_manager.isRegFree(prev_reg));
+ self.register_manager.freeReg(prev_reg);
+ },
+ else => {},
+ }
}
+
+ return reg;
}
-fn genArmBinIntOp(
+/// Don't call this function directly. Use binOp instead.
+///
+/// Calling this function signals an intention to generate a Mir
+/// instruction of the form
+///
+/// op dest, lhs, rhs
+///
+/// Asserts that generating an instruction of that form is possible.
+fn binOpRegister(
self: *Self,
- inst: Air.Inst.Index,
- op_lhs: Air.Inst.Ref,
- op_rhs: Air.Inst.Ref,
- op: Air.Inst.Tag,
- bits: u16,
- signedness: std.builtin.Signedness,
+ mir_tag: Mir.Inst.Tag,
+ lhs: MCValue,
+ rhs: MCValue,
+ lhs_ty: Type,
+ rhs_ty: Type,
+ metadata: ?BinOpMetadata,
) !MCValue {
- if (bits > 32) {
- return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
- }
-
- const lhs = try self.resolveInst(op_lhs);
- const rhs = try self.resolveInst(op_rhs);
-
const lhs_is_register = lhs == .register;
const rhs_is_register = rhs == .register;
- const lhs_should_be_register = switch (op) {
- .shr, .shl => true,
- else => try self.armOperandShouldBeRegister(lhs),
- };
- const rhs_should_be_register = try self.armOperandShouldBeRegister(rhs);
- const reuse_lhs = lhs_is_register and self.reuseOperand(inst, op_lhs, 0, lhs);
- const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, op_rhs, 1, rhs);
- const can_swap_lhs_and_rhs = switch (op) {
- .shr, .shl => false,
- else => true,
- };
-
- if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
- defer if (lhs_is_register) self.register_manager.unfreezeRegs(&.{lhs.register});
- if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register});
- defer if (rhs_is_register) self.register_manager.unfreezeRegs(&.{rhs.register});
-
- // Destination must be a register
- var dst_mcv: MCValue = undefined;
- var lhs_mcv = lhs;
- var rhs_mcv = rhs;
- var swap_lhs_and_rhs = false;
-
- // Allocate registers for operands and/or destination
- const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
- if (reuse_lhs) {
- // Allocate 0 or 1 registers
- if (!rhs_is_register and rhs_should_be_register) {
- rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_rhs).?) };
- branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv);
- }
- dst_mcv = lhs;
- } else if (reuse_rhs and can_swap_lhs_and_rhs) {
- // Allocate 0 or 1 registers
- if (!lhs_is_register and lhs_should_be_register) {
- lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_lhs).?) };
- branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv);
- }
- dst_mcv = rhs;
- swap_lhs_and_rhs = true;
- } else {
- // Allocate 1 or 2 registers
- if (lhs_should_be_register and rhs_should_be_register) {
- if (lhs_is_register and rhs_is_register) {
- dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) };
- } else if (lhs_is_register) {
- // Move RHS to register
- dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) };
- rhs_mcv = dst_mcv;
- } else if (rhs_is_register) {
- // Move LHS to register
- dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) };
- lhs_mcv = dst_mcv;
- } else {
- // Move LHS and RHS to register
- const regs = try self.register_manager.allocRegs(2, .{ inst, Air.refToIndex(op_rhs).? });
- lhs_mcv = MCValue{ .register = regs[0] };
- rhs_mcv = MCValue{ .register = regs[1] };
- dst_mcv = lhs_mcv;
+ const lhs_lock: ?RegisterLock = if (lhs_is_register)
+ self.register_manager.lockReg(lhs.register)
+ else
+ null;
+ defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
- branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv);
- }
- } else if (lhs_should_be_register) {
- // RHS is immediate
- if (lhs_is_register) {
- dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) };
+ const lhs_reg = if (lhs_is_register) lhs.register else blk: {
+ const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
+ break :inst Air.refToIndex(md.lhs).?;
+ } else null;
+
+ break :blk try self.prepareNewRegForMoving(track_inst, gp, lhs);
+ };
+ const new_lhs_lock = self.register_manager.lockReg(lhs_reg);
+ defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
+
+ const rhs_reg = if (rhs_is_register) rhs.register else blk: {
+ const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
+ break :inst Air.refToIndex(md.rhs).?;
+ } else null;
+
+ break :blk try self.prepareNewRegForMoving(track_inst, gp, rhs);
+ };
+ const new_rhs_lock = self.register_manager.lockReg(rhs_reg);
+ defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg);
+
+ const dest_reg = switch (mir_tag) {
+ .cmp => .r0, // cmp has no destination regardless
+ else => if (metadata) |md| blk: {
+ if (lhs_is_register and self.reuseOperand(md.inst, md.lhs, 0, lhs)) {
+ break :blk lhs_reg;
+ } else if (rhs_is_register and self.reuseOperand(md.inst, md.rhs, 1, rhs)) {
+ break :blk rhs_reg;
} else {
- dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) };
- lhs_mcv = dst_mcv;
+ break :blk try self.register_manager.allocReg(md.inst, gp);
}
- } else if (rhs_should_be_register and can_swap_lhs_and_rhs) {
- // LHS is immediate
- if (rhs_is_register) {
- dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) };
+ } else try self.register_manager.allocReg(null, gp),
+ };
+
+ if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
+ if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
+
+ const mir_data: Mir.Inst.Data = switch (mir_tag) {
+ .add,
+ .adds,
+ .sub,
+ .subs,
+ .cmp,
+ .@"and",
+ .orr,
+ .eor,
+ => .{ .rr_op = .{
+ .rd = dest_reg,
+ .rn = lhs_reg,
+ .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none),
+ } },
+ .lsl,
+ .asr,
+ .lsr,
+ => .{ .rr_shift = .{
+ .rd = dest_reg,
+ .rm = lhs_reg,
+ .shift_amount = Instruction.ShiftAmount.reg(rhs_reg),
+ } },
+ .mul,
+ .smulbb,
+ => .{ .rrr = .{
+ .rd = dest_reg,
+ .rn = lhs_reg,
+ .rm = rhs_reg,
+ } },
+ else => unreachable,
+ };
+
+ _ = try self.addInst(.{
+ .tag = mir_tag,
+ .data = mir_data,
+ });
+
+ return MCValue{ .register = dest_reg };
+}
+
+/// Don't call this function directly. Use binOp instead.
+///
+/// Calling this function signals an intention to generate a Mir
+/// instruction of the form
+///
+/// op dest, lhs, #rhs_imm
+///
+/// Set lhs_and_rhs_swapped to true iff inst.bin_op.lhs corresponds to
+/// rhs and vice versa. This parameter is only used when maybe_inst !=
+/// null.
+///
+/// Asserts that generating an instruction of that form is possible.
+fn binOpImmediate(
+ self: *Self,
+ mir_tag: Mir.Inst.Tag,
+ lhs: MCValue,
+ rhs: MCValue,
+ lhs_ty: Type,
+ lhs_and_rhs_swapped: bool,
+ metadata: ?BinOpMetadata,
+) !MCValue {
+ const lhs_is_register = lhs == .register;
+
+ const lhs_lock: ?RegisterLock = if (lhs_is_register)
+ self.register_manager.lockReg(lhs.register)
+ else
+ null;
+ defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
+
+ const lhs_reg = if (lhs_is_register) lhs.register else blk: {
+ const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
+ break :inst Air.refToIndex(
+ if (lhs_and_rhs_swapped) md.rhs else md.lhs,
+ ).?;
+ } else null;
+
+ break :blk try self.prepareNewRegForMoving(track_inst, gp, lhs);
+ };
+ const new_lhs_lock = self.register_manager.lockReg(lhs_reg);
+ defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
+
+ const dest_reg = switch (mir_tag) {
+ .cmp => .r0, // cmp has no destination reg
+ else => if (metadata) |md| blk: {
+ if (lhs_is_register and self.reuseOperand(
+ md.inst,
+ if (lhs_and_rhs_swapped) md.rhs else md.lhs,
+ if (lhs_and_rhs_swapped) 1 else 0,
+ lhs,
+ )) {
+ break :blk lhs_reg;
} else {
- dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) };
- rhs_mcv = dst_mcv;
+ break :blk try self.register_manager.allocReg(md.inst, gp);
}
+ } else try self.register_manager.allocReg(null, gp),
+ };
- swap_lhs_and_rhs = true;
- } else unreachable; // binary operation on two immediates
- }
+ if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
- // Move the operands to the newly allocated registers
- if (lhs_mcv == .register and !lhs_is_register) {
- try self.genSetReg(self.air.typeOf(op_lhs), lhs_mcv.register, lhs);
- }
- if (rhs_mcv == .register and !rhs_is_register) {
- try self.genSetReg(self.air.typeOf(op_rhs), rhs_mcv.register, rhs);
- }
+ const mir_data: Mir.Inst.Data = switch (mir_tag) {
+ .add,
+ .adds,
+ .sub,
+ .subs,
+ .cmp,
+ .@"and",
+ .orr,
+ .eor,
+ => .{ .rr_op = .{
+ .rd = dest_reg,
+ .rn = lhs_reg,
+ .op = Instruction.Operand.fromU32(rhs.immediate).?,
+ } },
+ .lsl,
+ .asr,
+ .lsr,
+ => .{ .rr_shift = .{
+ .rd = dest_reg,
+ .rm = lhs_reg,
+ .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs.immediate)),
+ } },
+ else => unreachable,
+ };
- try self.genArmBinOpCode(
- dst_mcv.register,
- lhs_mcv,
- rhs_mcv,
- swap_lhs_and_rhs,
- op,
- signedness,
- );
- return dst_mcv;
+ _ = try self.addInst(.{
+ .tag = mir_tag,
+ .data = mir_data,
+ });
+
+ return MCValue{ .register = dest_reg };
}
-fn genArmBinOpCode(
+const BinOpMetadata = struct {
+ inst: Air.Inst.Index,
+ lhs: Air.Inst.Ref,
+ rhs: Air.Inst.Ref,
+};
+
+/// For all your binary operation needs, this function will generate
+/// the corresponding Mir instruction(s). Returns the location of the
+/// result.
+///
+/// If the binary operation itself happens to be an Air instruction,
+/// pass the corresponding index in the inst parameter. That helps
+/// this function do stuff like reusing operands.
+///
+/// This function does not do any lowering to Mir itself, but instead
+/// looks at the lhs and rhs and determines which kind of lowering
+/// would be best suitable and then delegates the lowering to other
+/// functions.
+fn binOp(
self: *Self,
- dst_reg: Register,
- lhs_mcv: MCValue,
- rhs_mcv: MCValue,
- swap_lhs_and_rhs: bool,
- op: Air.Inst.Tag,
- signedness: std.builtin.Signedness,
-) !void {
- assert(lhs_mcv == .register or rhs_mcv == .register);
+ tag: Air.Inst.Tag,
+ lhs: MCValue,
+ rhs: MCValue,
+ lhs_ty: Type,
+ rhs_ty: Type,
+ metadata: ?BinOpMetadata,
+) InnerError!MCValue {
+ switch (tag) {
+ .add,
+ .sub,
+ .cmp_eq,
+ => {
+ switch (lhs_ty.zigTypeTag()) {
+ .Float => return self.fail("TODO ARM binary operations on floats", .{}),
+ .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .Int => {
+ const mod = self.bin_file.options.module.?;
+ assert(lhs_ty.eql(rhs_ty, mod));
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 32) {
+ // Only say yes if the operation is
+ // commutative, i.e. we can swap both of the
+ // operands
+ const lhs_immediate_ok = switch (tag) {
+ .add => lhs == .immediate and Instruction.Operand.fromU32(lhs.immediate) != null,
+ .sub,
+ .cmp_eq,
+ => false,
+ else => unreachable,
+ };
+ const rhs_immediate_ok = switch (tag) {
+ .add,
+ .sub,
+ .cmp_eq,
+ => rhs == .immediate and Instruction.Operand.fromU32(rhs.immediate) != null,
+ else => unreachable,
+ };
- const op1 = if (swap_lhs_and_rhs) rhs_mcv.register else lhs_mcv.register;
- const op2 = if (swap_lhs_and_rhs) lhs_mcv else rhs_mcv;
+ const mir_tag: Mir.Inst.Tag = switch (tag) {
+ .add => .add,
+ .sub => .sub,
+ .cmp_eq => .cmp,
+ else => unreachable,
+ };
- const operand = switch (op2) {
- .none => unreachable,
- .undef => unreachable,
- .dead, .unreach => unreachable,
- .compare_flags_unsigned => unreachable,
- .compare_flags_signed => unreachable,
- .ptr_stack_offset => unreachable,
- .ptr_embedded_in_code => unreachable,
- .immediate => |imm| Instruction.Operand.fromU32(@intCast(u32, imm)).?,
- .register => |reg| Instruction.Operand.reg(reg, Instruction.Operand.Shift.none),
- .stack_offset,
- .stack_argument_offset,
- .embedded_in_code,
- .memory,
- => unreachable,
- };
+ if (rhs_immediate_ok) {
+ return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
+ } else if (lhs_immediate_ok) {
+ // swap lhs and rhs
+ return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata);
+ } else {
+ return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
+ }
+ } else {
+ return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
+ }
+ },
+ else => unreachable,
+ }
+ },
+ .mul => {
+ switch (lhs_ty.zigTypeTag()) {
+ .Float => return self.fail("TODO ARM binary operations on floats", .{}),
+ .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .Int => {
+ const mod = self.bin_file.options.module.?;
+ assert(lhs_ty.eql(rhs_ty, mod));
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 32) {
+ // TODO add optimisations for multiplication
+ // with immediates, for example a * 2 can be
+ // lowered to a << 1
+ return try self.binOpRegister(.mul, lhs, rhs, lhs_ty, rhs_ty, metadata);
+ } else {
+ return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
+ }
+ },
+ else => unreachable,
+ }
+ },
+ .addwrap,
+ .subwrap,
+ .mulwrap,
+ => {
+ const base_tag: Air.Inst.Tag = switch (tag) {
+ .addwrap => .add,
+ .subwrap => .sub,
+ .mulwrap => .mul,
+ else => unreachable,
+ };
- switch (op) {
- .add,
- .bool_and,
+ // Generate an add/sub/mul
+ const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
+
+ // Truncate if necessary
+ switch (lhs_ty.zigTypeTag()) {
+ .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .Int => {
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 32) {
+ const result_reg = result.register;
+
+ if (int_info.bits < 32) {
+ try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits);
+ return result;
+ } else return result;
+ } else {
+ return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
+ }
+ },
+ else => unreachable,
+ }
+ },
.bit_and,
- .bool_or,
.bit_or,
- .not,
.xor,
=> {
- const tag: Mir.Inst.Tag = switch (op) {
- .add => .add,
- .bool_and, .bit_and => .@"and",
- .bool_or, .bit_or => .orr,
- .not, .xor => .eor,
- else => unreachable,
- };
+ switch (lhs_ty.zigTypeTag()) {
+ .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .Int => {
+ const mod = self.bin_file.options.module.?;
+ assert(lhs_ty.eql(rhs_ty, mod));
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 32) {
+ const lhs_immediate_ok = lhs == .immediate and Instruction.Operand.fromU32(lhs.immediate) != null;
+ const rhs_immediate_ok = rhs == .immediate and Instruction.Operand.fromU32(rhs.immediate) != null;
+
+ const mir_tag: Mir.Inst.Tag = switch (tag) {
+ .bit_and => .@"and",
+ .bit_or => .orr,
+ .xor => .eor,
+ else => unreachable,
+ };
- _ = try self.addInst(.{
- .tag = tag,
- .data = .{ .rr_op = .{
- .rd = dst_reg,
- .rn = op1,
- .op = operand,
- } },
- });
+ if (rhs_immediate_ok) {
+ return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
+ } else if (lhs_immediate_ok) {
+ // swap lhs and rhs
+ return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata);
+ } else {
+ return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
+ }
+ } else {
+ return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
+ }
+ },
+ else => unreachable,
+ }
},
- .sub => {
- const tag: Mir.Inst.Tag = if (swap_lhs_and_rhs) .rsb else .sub;
+ .shl_exact,
+ .shr_exact,
+ => {
+ switch (lhs_ty.zigTypeTag()) {
+ .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .Int => {
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 32) {
+ const rhs_immediate_ok = rhs == .immediate;
+
+ const mir_tag: Mir.Inst.Tag = switch (tag) {
+ .shl_exact => .lsl,
+ .shr_exact => switch (lhs_ty.intInfo(self.target.*).signedness) {
+ .signed => Mir.Inst.Tag.asr,
+ .unsigned => Mir.Inst.Tag.lsr,
+ },
+ else => unreachable,
+ };
- _ = try self.addInst(.{
- .tag = tag,
- .data = .{ .rr_op = .{
- .rd = dst_reg,
- .rn = op1,
- .op = operand,
- } },
- });
- },
- .cmp_eq => {
- _ = try self.addInst(.{
- .tag = .cmp,
- .data = .{ .rr_op = .{
- .rd = .r0,
- .rn = op1,
- .op = operand,
- } },
- });
+ if (rhs_immediate_ok) {
+ return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
+ } else {
+ return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
+ }
+ } else {
+ return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
+ }
+ },
+ else => unreachable,
+ }
},
- .shl, .shr => {
- assert(!swap_lhs_and_rhs);
- const shift_amount = switch (operand) {
- .Register => |reg_op| Instruction.ShiftAmount.reg(@intToEnum(Register, reg_op.rm)),
- .Immediate => |imm_op| Instruction.ShiftAmount.imm(@intCast(u5, imm_op.imm)),
+ .shl,
+ .shr,
+ => {
+ const base_tag: Air.Inst.Tag = switch (tag) {
+ .shl => .shl_exact,
+ .shr => .shr_exact,
+ else => unreachable,
};
- const tag: Mir.Inst.Tag = switch (op) {
- .shl => .lsl,
- .shr => switch (signedness) {
- .signed => Mir.Inst.Tag.asr,
- .unsigned => Mir.Inst.Tag.lsr,
+ // Generate a shl_exact/shr_exact
+ const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
+
+ // Truncate if necessary
+ switch (tag) {
+ .shr => return result,
+ .shl => switch (lhs_ty.zigTypeTag()) {
+ .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .Int => {
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 32) {
+ const result_reg = result.register;
+
+ if (int_info.bits < 32) {
+ try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits);
+ return result;
+ } else return result;
+ } else {
+ return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
+ }
+ },
+ else => unreachable,
},
else => unreachable,
- };
+ }
+ },
+ .bool_and,
+ .bool_or,
+ => {
+ switch (lhs_ty.zigTypeTag()) {
+ .Bool => {
+ const lhs_immediate_ok = lhs == .immediate;
+ const rhs_immediate_ok = rhs == .immediate;
+
+ const mir_tag: Mir.Inst.Tag = switch (tag) {
+ .bool_and => .@"and",
+ .bool_or => .orr,
+ else => unreachable,
+ };
- _ = try self.addInst(.{
- .tag = tag,
- .data = .{ .rr_shift = .{
- .rd = dst_reg,
- .rm = op1,
- .shift_amount = shift_amount,
- } },
- });
+ if (rhs_immediate_ok) {
+ return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
+ } else if (lhs_immediate_ok) {
+ // swap lhs and rhs
+ return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata);
+ } else {
+ return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
+ }
+ },
+ else => unreachable,
+ }
},
- else => unreachable, // not a binary instruction
+ .ptr_add,
+ .ptr_sub,
+ => {
+ switch (lhs_ty.zigTypeTag()) {
+ .Pointer => {
+ const ptr_ty = lhs_ty;
+ const elem_ty = switch (ptr_ty.ptrSize()) {
+ .One => ptr_ty.childType().childType(), // ptr to array, so get array element type
+ else => ptr_ty.childType(),
+ };
+ const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
+
+ if (elem_size == 1) {
+ const base_tag: Mir.Inst.Tag = switch (tag) {
+ .ptr_add => .add,
+ .ptr_sub => .sub,
+ else => unreachable,
+ };
+
+ return try self.binOpRegister(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
+ } else {
+ // convert the offset into a byte offset by
+ // multiplying it with elem_size
+ const offset = try self.binOp(.mul, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize, null);
+ const addr = try self.binOp(tag, lhs, offset, Type.initTag(.manyptr_u8), Type.usize, null);
+ return addr;
+ }
+ },
+ else => unreachable,
+ }
+ },
+ else => unreachable,
}
}
-fn genArmMul(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref) !MCValue {
- const lhs = try self.resolveInst(op_lhs);
- const rhs = try self.resolveInst(op_rhs);
-
- const lhs_is_register = lhs == .register;
- const rhs_is_register = rhs == .register;
- const reuse_lhs = lhs_is_register and self.reuseOperand(inst, op_lhs, 0, lhs);
- const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, op_rhs, 1, rhs);
-
- if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
- defer if (lhs_is_register) self.register_manager.unfreezeRegs(&.{lhs.register});
- if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register});
- defer if (rhs_is_register) self.register_manager.unfreezeRegs(&.{rhs.register});
-
- // Destination must be a register
- // LHS must be a register
- // RHS must be a register
- var dst_mcv: MCValue = undefined;
- var lhs_mcv: MCValue = lhs;
- var rhs_mcv: MCValue = rhs;
-
- // Allocate registers for operands and/or destination
- const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
- if (reuse_lhs) {
- // Allocate 0 or 1 registers
- if (!rhs_is_register) {
- rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_rhs).?) };
- branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv);
- }
- dst_mcv = lhs;
- } else if (reuse_rhs) {
- // Allocate 0 or 1 registers
- if (!lhs_is_register) {
- lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_lhs).?) };
- branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv);
- }
- dst_mcv = rhs;
- } else {
- // Allocate 1 or 2 registers
- if (lhs_is_register and rhs_is_register) {
- dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) };
- } else if (lhs_is_register) {
- // Move RHS to register
- dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) };
- rhs_mcv = dst_mcv;
- } else if (rhs_is_register) {
- // Move LHS to register
- dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst) };
- lhs_mcv = dst_mcv;
- } else {
- // Move LHS and RHS to register
- const regs = try self.register_manager.allocRegs(2, .{ inst, Air.refToIndex(op_rhs).? });
- lhs_mcv = MCValue{ .register = regs[0] };
- rhs_mcv = MCValue{ .register = regs[1] };
- dst_mcv = lhs_mcv;
+fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) !void {
+ const abi_size = ty.abiSize(self.target.*);
- branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv);
- }
- }
+ const tag: Mir.Inst.Tag = switch (abi_size) {
+ 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb else .ldrb,
+ 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh else .ldrh,
+ 3, 4 => .ldr,
+ else => unreachable,
+ };
- // Move the operands to the newly allocated registers
- if (!lhs_is_register) {
- try self.genSetReg(self.air.typeOf(op_lhs), lhs_mcv.register, lhs);
- }
- if (!rhs_is_register) {
- try self.genSetReg(self.air.typeOf(op_rhs), rhs_mcv.register, rhs);
- }
+ const rr_offset: Mir.Inst.Data = .{ .rr_offset = .{
+ .rt = dest_reg,
+ .rn = addr_reg,
+ .offset = .{ .offset = Instruction.Offset.none },
+ } };
+ const rr_extra_offset: Mir.Inst.Data = .{ .rr_extra_offset = .{
+ .rt = dest_reg,
+ .rn = addr_reg,
+ .offset = .{ .offset = Instruction.ExtraLoadStoreOffset.none },
+ } };
+
+ const data: Mir.Inst.Data = switch (abi_size) {
+ 1 => if (ty.isSignedInt()) rr_extra_offset else rr_offset,
+ 2 => rr_extra_offset,
+ 3, 4 => rr_offset,
+ else => unreachable,
+ };
_ = try self.addInst(.{
- .tag = .mul,
- .data = .{ .rrr = .{
- .rd = dst_mcv.register,
- .rn = lhs_mcv.register,
- .rm = rhs_mcv.register,
- } },
+ .tag = tag,
+ .data = data,
});
- return dst_mcv;
}
-fn genArmMulConstant(self: *Self, inst: Air.Inst.Index, op: Air.Inst.Ref, op_index: Liveness.OperandInt, imm: u32) !MCValue {
- const lhs = try self.resolveInst(op);
- const rhs = MCValue{ .immediate = imm };
+fn genStrRegister(self: *Self, source_reg: Register, addr_reg: Register, ty: Type) !void {
+ const abi_size = ty.abiSize(self.target.*);
- const lhs_is_register = lhs == .register;
- const reuse_lhs = lhs_is_register and self.reuseOperand(inst, op, op_index, lhs);
-
- if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
- defer if (lhs_is_register) self.register_manager.unfreezeRegs(&.{lhs.register});
-
- // Destination must be a register
- // LHS must be a register
- // RHS must be a register
- var dst_mcv: MCValue = undefined;
- var lhs_mcv: MCValue = lhs;
- var rhs_mcv: MCValue = rhs;
-
- // Allocate registers for operands and/or destination
- if (reuse_lhs) {
- // Allocate 1 register
- rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(null) };
- dst_mcv = lhs;
- } else {
- // Allocate 1 or 2 registers
- if (lhs_is_register) {
- // Move RHS to register
- dst_mcv = MCValue{ .register = try self.register_manager.allocReg(null) };
- rhs_mcv = dst_mcv;
- } else {
- // Move LHS and RHS to register
- const regs = try self.register_manager.allocRegs(2, .{ null, null });
- lhs_mcv = MCValue{ .register = regs[0] };
- rhs_mcv = MCValue{ .register = regs[1] };
- dst_mcv = lhs_mcv;
- }
- }
+ const tag: Mir.Inst.Tag = switch (abi_size) {
+ 1 => .strb,
+ 2 => .strh,
+ 3, 4 => .str,
+ else => unreachable,
+ };
- // Move the operands to the newly allocated registers
- if (!lhs_is_register) {
- try self.genSetReg(self.air.typeOf(op), lhs_mcv.register, lhs);
- }
- try self.genSetReg(Type.initTag(.usize), rhs_mcv.register, rhs);
+ const rr_offset: Mir.Inst.Data = .{ .rr_offset = .{
+ .rt = source_reg,
+ .rn = addr_reg,
+ .offset = .{ .offset = Instruction.Offset.none },
+ } };
+ const rr_extra_offset: Mir.Inst.Data = .{ .rr_extra_offset = .{
+ .rt = source_reg,
+ .rn = addr_reg,
+ .offset = .{ .offset = Instruction.ExtraLoadStoreOffset.none },
+ } };
+
+ const data: Mir.Inst.Data = switch (abi_size) {
+ 1, 3, 4 => rr_offset,
+ 2 => rr_extra_offset,
+ else => unreachable,
+ };
_ = try self.addInst(.{
- .tag = .mul,
- .data = .{ .rrr = .{
- .rd = dst_mcv.register,
- .rn = lhs_mcv.register,
- .rm = rhs_mcv.register,
- } },
+ .tag = tag,
+ .data = data,
});
- return dst_mcv;
}
-fn genArmInlineMemcpy(
+fn genInlineMemcpy(
self: *Self,
src: Register,
dst: Register,
@@ -2132,7 +3183,7 @@ fn genArmInlineMemcpy(
.data = .{ .rr_offset = .{
.rt = tmp,
.rn = src,
- .offset = .{ .offset = Instruction.Offset.reg(count, 0) },
+ .offset = .{ .offset = Instruction.Offset.reg(count, .none) },
} },
});
@@ -2142,7 +3193,7 @@ fn genArmInlineMemcpy(
.data = .{ .rr_offset = .{
.rt = tmp,
.rn = dst,
- .offset = .{ .offset = Instruction.Offset.reg(count, 0) },
+ .offset = .{ .offset = Instruction.Offset.reg(count, .none) },
} },
});
@@ -2165,6 +3216,92 @@ fn genArmInlineMemcpy(
// end:
}
+/// Adds a Type to the .debug_info at the current position. The bytes will be populated later,
+/// after codegen for this symbol is done.
+fn addDbgInfoTypeReloc(self: *Self, ty: Type) error{OutOfMemory}!void {
+ switch (self.debug_output) {
+ .dwarf => |dw| {
+ assert(ty.hasRuntimeBits());
+ const dbg_info = &dw.dbg_info;
+ const index = dbg_info.items.len;
+ try dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4
+ const mod = self.bin_file.options.module.?;
+ const atom = switch (self.bin_file.tag) {
+ .elf => &mod.declPtr(self.mod_fn.owner_decl).link.elf.dbg_info_atom,
+ .macho => unreachable,
+ else => unreachable,
+ };
+ try dw.addTypeRelocGlobal(atom, ty, @intCast(u32, index));
+ },
+ .plan9 => {},
+ .none => {},
+ }
+}
+
+fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, arg_index: u32, stack_byte_count: u32) error{OutOfMemory}!void {
+ const prologue_stack_space = stack_byte_count + self.saved_regs_stack_space;
+
+ const mcv = self.args[arg_index];
+ const ty = self.air.instructions.items(.data)[inst].ty;
+ const name = self.mod_fn.getParamName(arg_index);
+ const name_with_null = name.ptr[0 .. name.len + 1];
+
+ switch (mcv) {
+ .register => |reg| {
+ switch (self.debug_output) {
+ .dwarf => |dw| {
+ const dbg_info = &dw.dbg_info;
+ try dbg_info.ensureUnusedCapacity(3);
+ dbg_info.appendAssumeCapacity(@enumToInt(link.File.Dwarf.AbbrevKind.parameter));
+ dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
+ 1, // ULEB128 dwarf expression length
+ reg.dwarfLocOp(),
+ });
+ try dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
+ try self.addDbgInfoTypeReloc(ty); // DW.AT.type, DW.FORM.ref4
+ dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
+ },
+ .plan9 => {},
+ .none => {},
+ }
+ },
+ .stack_offset,
+ .stack_argument_offset,
+ => {
+ switch (self.debug_output) {
+ .dwarf => |dw| {
+ // const abi_size = @intCast(u32, ty.abiSize(self.target.*));
+ const adjusted_stack_offset = switch (mcv) {
+ .stack_offset => |offset| -@intCast(i32, offset),
+ .stack_argument_offset => |offset| @intCast(i32, prologue_stack_space - offset),
+ else => unreachable,
+ };
+
+ const dbg_info = &dw.dbg_info;
+ try dbg_info.append(@enumToInt(link.File.Dwarf.AbbrevKind.parameter));
+
+ // Get length of the LEB128 stack offset
+ var counting_writer = std.io.countingWriter(std.io.null_writer);
+ leb128.writeILEB128(counting_writer.writer(), adjusted_stack_offset) catch unreachable;
+
+ // DW.AT.location, DW.FORM.exprloc
+ // ULEB128 dwarf expression length
+ try leb128.writeULEB128(dbg_info.writer(), counting_writer.bytes_written + 1);
+ try dbg_info.append(DW.OP.breg11);
+ try leb128.writeILEB128(dbg_info.writer(), adjusted_stack_offset);
+
+ try dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
+ try self.addDbgInfoTypeReloc(ty); // DW.AT.type, DW.FORM.ref4
+ dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
+ },
+ .plan9 => {},
+ .none => {},
+ }
+ },
+ else => unreachable, // not a possible argument
+ }
+}
+
fn airArg(self: *Self, inst: Air.Inst.Index) !void {
const arg_index = self.arg_index;
self.arg_index += 1;
@@ -2175,9 +3312,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
const mcv = switch (result) {
// Copy registers to the stack
.register => |reg| blk: {
- const abi_size = math.cast(u32, ty.abiSize(self.target.*)) catch {
- return self.fail("type '{}' too big to fit into stack frame", .{ty});
- };
+ const abi_size = @intCast(u32, ty.abiSize(self.target.*));
const abi_align = ty.abiAlignment(self.target.*);
const stack_offset = try self.allocMem(inst, abi_size, abi_align);
try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
@@ -2187,13 +3322,9 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
else => result,
};
- _ = try self.addInst(.{
- .tag = .dbg_arg,
- .cond = undefined,
- .data = .{ .dbg_arg_info = .{
- .air_inst = inst,
- .arg_index = arg_index,
- } },
+ try self.dbg_arg_relocs.append(self.gpa, .{
+ .inst = inst,
+ .index = arg_index,
});
if (self.liveness.isUnused(inst))
@@ -2217,8 +3348,14 @@ fn airBreakpoint(self: *Self) !void {
return self.finishAirBookkeeping();
}
-fn airRetAddr(self: *Self) !void {
- return self.fail("TODO implement airRetAddr for {}", .{self.target.cpu.arch});
+fn airRetAddr(self: *Self, inst: Air.Inst.Index) !void {
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airRetAddr for arm", .{});
+ return self.finishAir(inst, result, .{ .none, .none, .none });
+}
+
+fn airFrameAddress(self: *Self, inst: Air.Inst.Index) !void {
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFrameAddress for arm", .{});
+ return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airFence(self: *Self) !void {
@@ -2226,11 +3363,12 @@ fn airFence(self: *Self) !void {
//return self.finishAirBookkeeping();
}
-fn airCall(self: *Self, inst: Air.Inst.Index) !void {
+fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.Modifier) !void {
+ if (modifier == .always_tail) return self.fail("TODO implement tail calls for arm", .{});
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const callee = pl_op.operand;
const extra = self.air.extraData(Air.Call, pl_op.payload);
- const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
+ const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
const ty = self.air.typeOf(callee);
const fn_ty = switch (ty.zigTypeTag()) {
@@ -2246,108 +3384,125 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
// Architecture, compare flags are not preserved across
// calls. Therefore, if some value is currently stored there, we
// need to save it.
- //
- // TODO once caller-saved registers are implemented, save them
- // here too, but crucially *after* we save the compare flags as
- // saving compare flags may require a new caller-saved register
try self.spillCompareFlagsIfOccupied();
+ // Save caller-saved registers, but crucially *after* we save the
+ // compare flags as saving compare flags may require a new
+ // caller-saved register
+ for (caller_preserved_regs) |reg| {
+ try self.register_manager.getReg(reg, null);
+ }
+
+ if (info.return_value == .stack_offset) {
+ log.debug("airCall: return by reference", .{});
+ const ret_ty = fn_ty.fnReturnType();
+ const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*));
+ const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*));
+ const stack_offset = try self.allocMem(inst, ret_abi_size, ret_abi_align);
+
+ var ptr_ty_payload: Type.Payload.ElemType = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .data = ret_ty,
+ };
+ const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ try self.register_manager.getReg(.r0, null);
+ try self.genSetReg(ptr_ty, .r0, .{ .ptr_stack_offset = stack_offset });
+
+ info.return_value = .{ .stack_offset = stack_offset };
+ }
+
// Make space for the arguments passed via the stack
self.max_end_stack += info.stack_byte_count;
+ for (info.args) |mc_arg, arg_i| {
+ const arg = args[arg_i];
+ const arg_ty = self.air.typeOf(arg);
+ const arg_mcv = try self.resolveInst(args[arg_i]);
+
+ switch (mc_arg) {
+ .none => continue,
+ .register => |reg| {
+ try self.register_manager.getReg(reg, null);
+ try self.genSetReg(arg_ty, reg, arg_mcv);
+ },
+ .stack_offset => unreachable,
+ .stack_argument_offset => |offset| try self.genSetStackArgument(
+ arg_ty,
+ info.stack_byte_count - offset,
+ arg_mcv,
+ ),
+ else => unreachable,
+ }
+ }
+
// Due to incremental compilation, how function calls are generated depends
// on linking.
- if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) {
- for (info.args) |mc_arg, arg_i| {
- const arg = args[arg_i];
- const arg_ty = self.air.typeOf(arg);
- const arg_mcv = try self.resolveInst(args[arg_i]);
-
- switch (mc_arg) {
- .none => continue,
- .undef => unreachable,
- .immediate => unreachable,
- .unreach => unreachable,
- .dead => unreachable,
- .embedded_in_code => unreachable,
- .memory => unreachable,
- .compare_flags_signed => unreachable,
- .compare_flags_unsigned => unreachable,
- .ptr_stack_offset => unreachable,
- .ptr_embedded_in_code => unreachable,
- .register => |reg| {
- try self.register_manager.getReg(reg, null);
- try self.genSetReg(arg_ty, reg, arg_mcv);
- },
- .stack_offset => unreachable,
- .stack_argument_offset => |offset| try self.genSetStackArgument(
- arg_ty,
- info.stack_byte_count - offset,
- arg_mcv,
- ),
+ switch (self.bin_file.tag) {
+ .elf, .coff => {
+ if (self.air.value(callee)) |func_value| {
+ if (func_value.castTag(.function)) |func_payload| {
+ const func = func_payload.data;
+ const ptr_bits = self.target.cpu.arch.ptrBitWidth();
+ const ptr_bytes: u64 = @divExact(ptr_bits, 8);
+ const mod = self.bin_file.options.module.?;
+ const fn_owner_decl = mod.declPtr(func.owner_decl);
+ const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
+ const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
+ break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file|
+ coff_file.offset_table_virtual_address + fn_owner_decl.link.coff.offset_table_index * ptr_bytes
+ else
+ unreachable;
+
+ try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr });
+ } else if (func_value.castTag(.extern_fn)) |_| {
+ return self.fail("TODO implement calling extern functions", .{});
+ } else {
+ return self.fail("TODO implement calling bitcasted functions", .{});
+ }
+ } else {
+ assert(ty.zigTypeTag() == .Pointer);
+ const mcv = try self.resolveInst(callee);
+
+ try self.genSetReg(Type.initTag(.usize), .lr, mcv);
}
- }
- if (self.air.value(callee)) |func_value| {
- if (func_value.castTag(.function)) |func_payload| {
- const func = func_payload.data;
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
- break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
- } else if (self.bin_file.cast(link.File.Coff)) |coff_file|
- coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes
- else
- unreachable;
-
- try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr });
- } else if (func_value.castTag(.extern_fn)) |_| {
- return self.fail("TODO implement calling extern functions", .{});
+ // TODO: add Instruction.supportedOn
+ // function for ARM
+ if (Target.arm.featureSetHas(self.target.cpu.features, .has_v5t)) {
+ _ = try self.addInst(.{
+ .tag = .blx,
+ .data = .{ .reg = .lr },
+ });
} else {
- return self.fail("TODO implement calling bitcasted functions", .{});
+ return self.fail("TODO fix blx emulation for ARM <v5", .{});
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .data = .{ .rr_op = .{
+ // .rd = .lr,
+ // .rn = .r0,
+ // .op = Instruction.Operand.reg(.pc, Instruction.Operand.Shift.none),
+ // } },
+ // });
+ // _ = try self.addInst(.{
+ // .tag = .bx,
+ // .data = .{ .reg = .lr },
+ // });
}
- } else {
- assert(ty.zigTypeTag() == .Pointer);
- const mcv = try self.resolveInst(callee);
-
- try self.genSetReg(Type.initTag(.usize), .lr, mcv);
- }
-
- // TODO: add Instruction.supportedOn
- // function for ARM
- if (Target.arm.featureSetHas(self.target.cpu.features, .has_v5t)) {
- _ = try self.addInst(.{
- .tag = .blx,
- .data = .{ .reg = .lr },
- });
- } else {
- return self.fail("TODO fix blx emulation for ARM <v5", .{});
- // _ = try self.addInst(.{
- // .tag = .mov,
- // .data = .{ .rr_op = .{
- // .rd = .lr,
- // .rn = .r0,
- // .op = Instruction.Operand.reg(.pc, Instruction.Operand.Shift.none),
- // } },
- // });
- // _ = try self.addInst(.{
- // .tag = .bx,
- // .data = .{ .reg = .lr },
- // });
- }
- } else if (self.bin_file.cast(link.File.MachO)) |_| {
- unreachable; // unsupported architecture for MachO
- } else if (self.bin_file.cast(link.File.Plan9)) |_| {
- return self.fail("TODO implement call on plan9 for {}", .{self.target.cpu.arch});
- } else unreachable;
+ },
+ .macho => unreachable, // unsupported architecture for MachO
+ .plan9 => return self.fail("TODO implement call on plan9 for {}", .{self.target.cpu.arch}),
+ else => unreachable,
+ }
const result: MCValue = result: {
switch (info.return_value) {
.register => |reg| {
- if (Register.allocIndex(reg) == null) {
- // Save function return value in a callee saved register
- break :result try self.copyToNewRegister(inst, info.return_value);
+ if (RegisterManager.indexOfRegIntoTracked(reg) == null) {
+ // Save function return value into a tracked register
+ log.debug("airCall: copying {} as it is not tracked", .{reg});
+ const new_reg = try self.copyToTmpRegister(fn_ty.fnReturnType(), info.return_value);
+ break :result MCValue{ .register = new_reg };
}
},
else => {},
@@ -2369,107 +3524,189 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
return bt.finishAir(result);
}
-fn ret(self: *Self, mcv: MCValue) !void {
+fn airRet(self: *Self, inst: Air.Inst.Index) !void {
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const operand = try self.resolveInst(un_op);
const ret_ty = self.fn_type.fnReturnType();
- try self.setRegOrMem(ret_ty, self.ret_mcv, mcv);
+
+ switch (self.ret_mcv) {
+ .none => {},
+ .immediate => {
+ assert(ret_ty.isError());
+ },
+ .register => |reg| {
+ // Return result by value
+ try self.genSetReg(ret_ty, reg, operand);
+ },
+ .stack_offset => {
+ // Return result by reference
+ //
+ // self.ret_mcv is an address to where this function
+ // should store its result into
+ var ptr_ty_payload: Type.Payload.ElemType = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .data = ret_ty,
+ };
+ const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ try self.store(self.ret_mcv, operand, ptr_ty, ret_ty);
+ },
+ else => unreachable, // invalid return result
+ }
// Just add space for an instruction, patch this later
try self.exitlude_jump_relocs.append(self.gpa, try self.addNop());
-}
-fn airRet(self: *Self, inst: Air.Inst.Index) !void {
- const un_op = self.air.instructions.items(.data)[inst].un_op;
- const operand = try self.resolveInst(un_op);
- try self.ret(operand);
return self.finishAir(inst, .dead, .{ un_op, .none, .none });
}
fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ptr = try self.resolveInst(un_op);
- _ = ptr;
- return self.fail("TODO implement airRetLoad for {}", .{self.target.cpu.arch});
- //return self.finishAir(inst, .dead, .{ un_op, .none, .none });
+ const ptr_ty = self.air.typeOf(un_op);
+ const ret_ty = self.fn_type.fnReturnType();
+
+ switch (self.ret_mcv) {
+ .none => {},
+ .register => {
+ // Return result by value
+ try self.load(self.ret_mcv, ptr, ptr_ty);
+ },
+ .stack_offset => {
+ // Return result by reference
+ //
+ // self.ret_mcv is an address to where this function
+ // should store its result into
+ //
+ // If the operand is a ret_ptr instruction, we are done
+ // here. Else we need to load the result from the location
+ // pointed to by the operand and store it to the result
+ // location.
+ const op_inst = Air.refToIndex(un_op).?;
+ if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) {
+ const abi_size = @intCast(u32, ret_ty.abiSize(self.target.*));
+ const abi_align = ret_ty.abiAlignment(self.target.*);
+
+ // This is essentially allocMem without the
+ // instruction tracking
+ if (abi_align > self.stack_align)
+ self.stack_align = abi_align;
+ // TODO find a free slot instead of always appending
+ const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size;
+ self.next_stack_offset = offset;
+ self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset);
+
+ const tmp_mcv = MCValue{ .stack_offset = offset };
+ try self.load(tmp_mcv, ptr, ptr_ty);
+ try self.store(self.ret_mcv, tmp_mcv, ptr_ty, ret_ty);
+ }
+ },
+ else => unreachable, // invalid return result
+ }
+
+ // Just add space for an instruction, patch this later
+ try self.exitlude_jump_relocs.append(self.gpa, try self.addNop());
+
+ return self.finishAir(inst, .dead, .{ un_op, .none, .none });
}
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
- const lhs = try self.resolveInst(bin_op.lhs);
- const rhs = try self.resolveInst(bin_op.rhs);
- const lhs_ty = self.air.typeOf(bin_op.lhs);
+ const lhs_ty = self.air.typeOf(bin_op.lhs);
+
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: {
+ const operands: BinOpOperands = .{ .inst = .{
+ .inst = inst,
+ .lhs = bin_op.lhs,
+ .rhs = bin_op.rhs,
+ } };
+ break :blk try self.cmp(operands, lhs_ty, op);
+ };
- if (lhs_ty.abiSize(self.target.*) > 4) {
- return self.fail("TODO cmp for types with size > 4", .{});
- }
+ return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+}
- const signedness: std.builtin.Signedness = blk: {
- // by default we tell the operand type is unsigned (i.e. bools and enum values)
- if (lhs_ty.zigTypeTag() != .Int) break :blk .unsigned;
+const BinOpOperands = union(enum) {
+ inst: struct {
+ inst: Air.Inst.Index,
+ lhs: Air.Inst.Ref,
+ rhs: Air.Inst.Ref,
+ },
+ mcv: struct {
+ lhs: MCValue,
+ rhs: MCValue,
+ },
+};
- // incase of an actual integer, we emit the correct signedness
- break :blk lhs_ty.intInfo(self.target.*).signedness;
- };
+fn cmp(
+ self: *Self,
+ operands: BinOpOperands,
+ lhs_ty: Type,
+ op: math.CompareOperator,
+) !MCValue {
+ var int_buffer: Type.Payload.Bits = undefined;
+ const int_ty = switch (lhs_ty.zigTypeTag()) {
+ .Optional => blk: {
+ var opt_buffer: Type.Payload.ElemType = undefined;
+ const payload_ty = lhs_ty.optionalChild(&opt_buffer);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ break :blk Type.initTag(.u1);
+ } else if (lhs_ty.isPtrLikeOptional()) {
+ break :blk Type.usize;
+ } else {
+ return self.fail("TODO ARM cmp non-pointer optionals", .{});
+ }
+ },
+ .Float => return self.fail("TODO ARM cmp floats", .{}),
+ .Enum => lhs_ty.intTagType(&int_buffer),
+ .Int => lhs_ty,
+ .Bool => Type.initTag(.u1),
+ .Pointer => Type.usize,
+ .ErrorSet => Type.initTag(.u16),
+ else => unreachable,
+ };
+ const int_info = int_ty.intInfo(self.target.*);
+ if (int_info.bits <= 32) {
try self.spillCompareFlagsIfOccupied();
- self.compare_flags_inst = inst;
-
- const lhs_is_register = lhs == .register;
- const rhs_is_register = rhs == .register;
- // lhs should always be a register
- const rhs_should_be_register = try self.armOperandShouldBeRegister(rhs);
-
- if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
- defer if (lhs_is_register) self.register_manager.unfreezeRegs(&.{lhs.register});
- if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register});
- defer if (rhs_is_register) self.register_manager.unfreezeRegs(&.{rhs.register});
-
- var lhs_mcv = lhs;
- var rhs_mcv = rhs;
-
- // Allocate registers
- if (rhs_should_be_register) {
- if (!lhs_is_register and !rhs_is_register) {
- const regs = try self.register_manager.allocRegs(2, .{
- Air.refToIndex(bin_op.lhs).?, Air.refToIndex(bin_op.rhs).?,
- });
- lhs_mcv = MCValue{ .register = regs[0] };
- rhs_mcv = MCValue{ .register = regs[1] };
- } else if (!rhs_is_register) {
- const track_inst = if (self.liveness.operandDies(inst, 1)) null else Air.refToIndex(bin_op.rhs).?;
- rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(track_inst) };
- } else if (!lhs_is_register) {
- const track_inst = if (self.liveness.operandDies(inst, 0)) null else Air.refToIndex(bin_op.lhs).?;
- lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(track_inst) };
- }
- } else {
- if (!lhs_is_register) {
- const track_inst = if (self.liveness.operandDies(inst, 0)) null else Air.refToIndex(bin_op.lhs).?;
- lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(track_inst) };
- }
- }
- // Move the operands to the newly allocated registers
- const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
- if (lhs_mcv == .register and !lhs_is_register) {
- try self.genSetReg(lhs_ty, lhs_mcv.register, lhs);
- branch.inst_table.putAssumeCapacity(Air.refToIndex(bin_op.lhs).?, lhs);
- }
- if (rhs_mcv == .register and !rhs_is_register) {
- try self.genSetReg(lhs_ty, rhs_mcv.register, rhs);
- branch.inst_table.putAssumeCapacity(Air.refToIndex(bin_op.rhs).?, rhs);
- }
+ switch (operands) {
+ .inst => |inst_op| {
+ const metadata: BinOpMetadata = .{
+ .inst = inst_op.inst,
+ .lhs = inst_op.lhs,
+ .rhs = inst_op.rhs,
+ };
+ const lhs = try self.resolveInst(inst_op.lhs);
+ const rhs = try self.resolveInst(inst_op.rhs);
- // The destination register is not present in the cmp instruction
- // The signedness of the integer does not matter for the cmp instruction
- try self.genArmBinOpCode(undefined, lhs_mcv, rhs_mcv, false, .cmp_eq, undefined);
+ self.cpsr_flags_inst = inst_op.inst;
+ _ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, metadata);
+ },
+ .mcv => |mcv_op| {
+ _ = try self.binOp(.cmp_eq, mcv_op.lhs, mcv_op.rhs, int_ty, int_ty, null);
+ },
+ }
- break :result switch (signedness) {
- .signed => MCValue{ .compare_flags_signed = op },
- .unsigned => MCValue{ .compare_flags_unsigned = op },
+ return switch (int_info.signedness) {
+ .signed => MCValue{ .cpsr_flags = Condition.fromCompareOperatorSigned(op) },
+ .unsigned => MCValue{ .cpsr_flags = Condition.fromCompareOperatorUnsigned(op) },
};
- };
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ } else {
+ return self.fail("TODO ARM cmp for ints > 32 bits", .{});
+ }
+}
+
+fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void {
+ _ = inst;
+ return self.fail("TODO implement airCmpVector for {}", .{self.target.cpu.arch});
+}
+
+fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const operand = try self.resolveInst(un_op);
+ _ = operand;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airCmpLtErrorsLen for {}", .{self.target.cpu.arch});
+ return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
@@ -2487,73 +3724,73 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAirBookkeeping();
}
-fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
- const pl_op = self.air.instructions.items(.data)[inst].pl_op;
- const cond = try self.resolveInst(pl_op.operand);
- const extra = self.air.extraData(Air.CondBr, pl_op.payload);
- const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len];
- const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
- const liveness_condbr = self.liveness.getCondBr(inst);
+fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const function = self.air.values[ty_pl.payload].castTag(.function).?.data;
+ // TODO emit debug info for function change
+ _ = function;
+ return self.finishAir(inst, .dead, .{ .none, .none, .none });
+}
- const reloc: Mir.Inst.Index = reloc: {
- const condition: Condition = switch (cond) {
- .compare_flags_signed => |cmp_op| blk: {
- // Here we map to the opposite condition because the jump is to the false branch.
- const condition = Condition.fromCompareOperatorSigned(cmp_op);
- break :blk condition.negate();
- },
- .compare_flags_unsigned => |cmp_op| blk: {
- // Here we map to the opposite condition because the jump is to the false branch.
- const condition = Condition.fromCompareOperatorUnsigned(cmp_op);
- break :blk condition.negate();
- },
- .register => |reg| blk: {
- try self.spillCompareFlagsIfOccupied();
+fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void {
+ // TODO emit debug info lexical block
+ return self.finishAir(inst, .dead, .{ .none, .none, .none });
+}
- // cmp reg, 1
- // bne ...
- _ = try self.addInst(.{
- .tag = .cmp,
- .cond = .al,
- .data = .{ .rr_op = .{
- .rd = .r0,
- .rn = reg,
- .op = Instruction.Operand.imm(1, 0),
- } },
- });
+fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void {
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const name = self.air.nullTerminatedString(pl_op.payload);
+ const operand = pl_op.operand;
+ // TODO emit debug info for this variable
+ _ = name;
+ return self.finishAir(inst, .dead, .{ operand, .none, .none });
+}
+
+/// Given a boolean condition, emit a jump that is taken when that
+/// condition is false.
+fn condBr(self: *Self, condition: MCValue) !Mir.Inst.Index {
+ const condition_code: Condition = switch (condition) {
+ .cpsr_flags => |cond| cond.negate(),
+ else => blk: {
+ const reg = switch (condition) {
+ .register => |r| r,
+ else => try self.copyToTmpRegister(Type.bool, condition),
+ };
- break :blk .ne;
- },
- .stack_offset,
- .memory,
- .stack_argument_offset,
- => blk: {
- try self.spillCompareFlagsIfOccupied();
+ try self.spillCompareFlagsIfOccupied();
- const reg = try self.copyToTmpRegister(Type.initTag(.bool), cond);
+ // cmp reg, 1
+ // bne ...
+ _ = try self.addInst(.{
+ .tag = .cmp,
+ .cond = .al,
+ .data = .{ .rr_op = .{
+ .rd = .r0,
+ .rn = reg,
+ .op = Instruction.Operand.imm(1, 0),
+ } },
+ });
- // cmp reg, 1
- // bne ...
- _ = try self.addInst(.{
- .tag = .cmp,
- .data = .{ .rr_op = .{
- .rd = .r0,
- .rn = reg,
- .op = Instruction.Operand.imm(1, 0),
- } },
- });
+ break :blk .ne;
+ },
+ };
- break :blk .ne;
- },
- else => return self.fail("TODO implement condbr {} when condition is {s}", .{ self.target.cpu.arch, @tagName(cond) }),
- };
+ return try self.addInst(.{
+ .tag = .b,
+ .cond = condition_code,
+ .data = .{ .inst = undefined }, // populated later through performReloc
+ });
+}
- break :reloc try self.addInst(.{
- .tag = .b,
- .cond = condition,
- .data = .{ .inst = undefined }, // populated later through performReloc
- });
- };
+fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const cond_inst = try self.resolveInst(pl_op.operand);
+ const extra = self.air.extraData(Air.CondBr, pl_op.payload);
+ const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len];
+ const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
+ const liveness_condbr = self.liveness.getCondBr(inst);
+
+ const reloc: Mir.Inst.Index = try self.condBr(cond_inst);
// If the condition dies here in this condbr instruction, process
// that death now instead of later as this has an effect on
@@ -2572,9 +3809,12 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
var parent_stack = try self.stack.clone(self.gpa);
defer parent_stack.deinit(self.gpa);
const parent_registers = self.register_manager.registers;
- const parent_compare_flags_inst = self.compare_flags_inst;
+ const parent_cpsr_flags_inst = self.cpsr_flags_inst;
try self.branch_stack.append(.{});
+ errdefer {
+ _ = self.branch_stack.pop();
+ }
try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len);
for (liveness_condbr.then_deaths) |operand| {
@@ -2588,7 +3828,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
defer saved_then_branch.deinit(self.gpa);
self.register_manager.registers = parent_registers;
- self.compare_flags_inst = parent_compare_flags_inst;
+ self.cpsr_flags_inst = parent_cpsr_flags_inst;
self.stack.deinit(self.gpa);
self.stack = parent_stack;
@@ -2678,7 +3918,10 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
// TODO track the new register / stack allocation
}
- self.branch_stack.pop().deinit(self.gpa);
+ {
+ var item = self.branch_stack.pop();
+ item.deinit(self.gpa);
+ }
// We already took care of pl_op.operand earlier, so we're going
// to pass .none here
@@ -2694,9 +3937,16 @@ fn isNull(self: *Self, ty: Type, operand: MCValue) !MCValue {
else => .{ .register = try self.copyToTmpRegister(ty, operand) },
};
- try self.genArmBinOpCode(undefined, reg_mcv, .{ .immediate = 0 }, false, .cmp_eq, undefined);
+ _ = try self.addInst(.{
+ .tag = .cmp,
+ .data = .{ .rr_op = .{
+ .rd = undefined,
+ .rn = reg_mcv.register,
+ .op = Instruction.Operand.fromU32(0).?,
+ } },
+ });
- return MCValue{ .compare_flags_unsigned = .eq };
+ return MCValue{ .cpsr_flags = .eq };
} else {
return self.fail("TODO implement non-pointer optionals", .{});
}
@@ -2704,43 +3954,30 @@ fn isNull(self: *Self, ty: Type, operand: MCValue) !MCValue {
fn isNonNull(self: *Self, ty: Type, operand: MCValue) !MCValue {
const is_null_result = try self.isNull(ty, operand);
- assert(is_null_result.compare_flags_unsigned == .eq);
+ assert(is_null_result.cpsr_flags == .eq);
- return MCValue{ .compare_flags_unsigned = .neq };
+ return MCValue{ .cpsr_flags = .ne };
}
fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
- _ = operand;
-
const error_type = ty.errorUnionSet();
- const payload_type = ty.errorUnionPayload();
+ const error_int_type = Type.initTag(.u16);
- if (!error_type.hasRuntimeBits()) {
+ if (error_type.errorSetIsEmpty()) {
return MCValue{ .immediate = 0 }; // always false
- } else if (!payload_type.hasRuntimeBits()) {
- if (error_type.abiSize(self.target.*) <= 4) {
- const reg_mcv: MCValue = switch (operand) {
- .register => operand,
- else => .{ .register = try self.copyToTmpRegister(error_type, operand) },
- };
-
- try self.genArmBinOpCode(undefined, reg_mcv, .{ .immediate = 0 }, false, .cmp_eq, undefined);
-
- return MCValue{ .compare_flags_unsigned = .gt };
- } else {
- return self.fail("TODO isErr for errors with size > 4", .{});
- }
- } else {
- return self.fail("TODO isErr for non-empty payloads", .{});
}
+
+ const error_mcv = try self.errUnionErr(operand, ty);
+ _ = try self.binOp(.cmp_eq, error_mcv, .{ .immediate = 0 }, error_int_type, error_int_type, null);
+ return MCValue{ .cpsr_flags = .hi };
}
fn isNonErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
const is_err_result = try self.isErr(ty, operand);
switch (is_err_result) {
- .compare_flags_unsigned => |op| {
- assert(op == .gt);
- return MCValue{ .compare_flags_unsigned = .lte };
+ .cpsr_flags => |cond| {
+ assert(cond == .hi);
+ return MCValue{ .cpsr_flags = cond.negate() };
},
.immediate => |imm| {
assert(imm == 0);
@@ -2754,7 +3991,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
try self.spillCompareFlagsIfOccupied();
- self.compare_flags_inst = inst;
+ self.cpsr_flags_inst = inst;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(un_op);
@@ -2907,7 +4144,17 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
const body = self.air.extra[extra.end..][0..extra.data.body_len];
try self.genBody(body);
- for (self.blocks.getPtr(inst).?.relocs.items) |reloc| try self.performReloc(reloc);
+ // relocations for `br` instructions
+ const relocs = &self.blocks.getPtr(inst).?.relocs;
+ if (relocs.items.len > 0 and relocs.items[relocs.items.len - 1] == self.mir_instructions.len - 1) {
+ // If the last Mir instruction is the last relocation (which
+ // would just jump one instruction further), it can be safely
+ // removed
+ self.mir_instructions.orderedRemove(relocs.pop());
+ }
+ for (relocs.items) |reloc| {
+ try self.performReloc(reloc);
+ }
const result = self.blocks.getPtr(inst).?.mcv;
return self.finishAir(inst, result, .{ .none, .none, .none });
@@ -2915,10 +4162,132 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
- const condition = pl_op.operand;
- _ = condition;
- return self.fail("TODO airSwitch for {}", .{self.target.cpu.arch});
- // return self.finishAir(inst, .dead, .{ condition, .none, .none });
+ const condition_ty = self.air.typeOf(pl_op.operand);
+ const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
+ const liveness = try self.liveness.getSwitchBr(
+ self.gpa,
+ inst,
+ switch_br.data.cases_len + 1,
+ );
+ defer self.gpa.free(liveness.deaths);
+
+ // If the condition dies here in this switch instruction, process
+ // that death now instead of later as this has an effect on
+ // whether it needs to be spilled in the branches
+ if (self.liveness.operandDies(inst, 0)) {
+ const op_int = @enumToInt(pl_op.operand);
+ if (op_int >= Air.Inst.Ref.typed_value_map.len) {
+ const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
+ self.processDeath(op_index);
+ }
+ }
+
+ var extra_index: usize = switch_br.end;
+ var case_i: u32 = 0;
+ while (case_i < switch_br.data.cases_len) : (case_i += 1) {
+ const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
+ const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
+ assert(items.len > 0);
+ const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
+ extra_index = case.end + items.len + case_body.len;
+
+ var relocs = try self.gpa.alloc(u32, items.len);
+ defer self.gpa.free(relocs);
+
+ if (items.len == 1) {
+ const condition = try self.resolveInst(pl_op.operand);
+ const item = try self.resolveInst(items[0]);
+
+ const operands: BinOpOperands = .{ .mcv = .{
+ .lhs = condition,
+ .rhs = item,
+ } };
+ const cmp_result = try self.cmp(operands, condition_ty, .eq);
+ relocs[0] = try self.condBr(cmp_result);
+ } else {
+ return self.fail("TODO switch with multiple items", .{});
+ }
+
+ // Capture the state of register and stack allocation state so that we can revert to it.
+ const parent_next_stack_offset = self.next_stack_offset;
+ const parent_free_registers = self.register_manager.free_registers;
+ const parent_cpsr_flags_inst = self.cpsr_flags_inst;
+ var parent_stack = try self.stack.clone(self.gpa);
+ defer parent_stack.deinit(self.gpa);
+ const parent_registers = self.register_manager.registers;
+
+ try self.branch_stack.append(.{});
+ errdefer {
+ _ = self.branch_stack.pop();
+ }
+
+ try self.ensureProcessDeathCapacity(liveness.deaths[case_i].len);
+ for (liveness.deaths[case_i]) |operand| {
+ self.processDeath(operand);
+ }
+ try self.genBody(case_body);
+
+ // Revert to the previous register and stack allocation state.
+ var saved_case_branch = self.branch_stack.pop();
+ defer saved_case_branch.deinit(self.gpa);
+
+ self.register_manager.registers = parent_registers;
+ self.cpsr_flags_inst = parent_cpsr_flags_inst;
+ self.stack.deinit(self.gpa);
+ self.stack = parent_stack;
+ parent_stack = .{};
+
+ self.next_stack_offset = parent_next_stack_offset;
+ self.register_manager.free_registers = parent_free_registers;
+
+ for (relocs) |reloc| {
+ try self.performReloc(reloc);
+ }
+ }
+
+ if (switch_br.data.else_body_len > 0) {
+ const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len];
+
+ // Capture the state of register and stack allocation state so that we can revert to it.
+ const parent_next_stack_offset = self.next_stack_offset;
+ const parent_free_registers = self.register_manager.free_registers;
+ const parent_cpsr_flags_inst = self.cpsr_flags_inst;
+ var parent_stack = try self.stack.clone(self.gpa);
+ defer parent_stack.deinit(self.gpa);
+ const parent_registers = self.register_manager.registers;
+
+ try self.branch_stack.append(.{});
+ errdefer {
+ _ = self.branch_stack.pop();
+ }
+
+ const else_deaths = liveness.deaths.len - 1;
+ try self.ensureProcessDeathCapacity(liveness.deaths[else_deaths].len);
+ for (liveness.deaths[else_deaths]) |operand| {
+ self.processDeath(operand);
+ }
+ try self.genBody(else_body);
+
+ // Revert to the previous register and stack allocation state.
+ var saved_case_branch = self.branch_stack.pop();
+ defer saved_case_branch.deinit(self.gpa);
+
+ self.register_manager.registers = parent_registers;
+ self.cpsr_flags_inst = parent_cpsr_flags_inst;
+ self.stack.deinit(self.gpa);
+ self.stack = parent_stack;
+ parent_stack = .{};
+
+ self.next_stack_offset = parent_next_stack_offset;
+ self.register_manager.free_registers = parent_free_registers;
+
+ // TODO consolidate returned MCValues between prongs and else branch like we do
+ // in airCondBr.
+ }
+
+ // We already took care of pl_op.operand earlier, so we're going
+ // to pass .none here
+ return self.finishAir(inst, .unreach, .{ .none, .none, .none });
}
fn performReloc(self: *Self, inst: Mir.Inst.Index) !void {
@@ -2935,17 +4304,6 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, .dead, .{ branch.operand, .none, .none });
}
-fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const air_tags = self.air.instructions.items(.tag);
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (air_tags[inst]) {
- .bool_and => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_and),
- .bool_or => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_or),
- else => unreachable, // Not a boolean operation
- };
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const block_data = self.blocks.getPtr(block).?;
@@ -2956,7 +4314,7 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
block_data.mcv = switch (operand_mcv) {
.none, .dead, .unreach => unreachable,
.register, .stack_offset, .memory => operand_mcv,
- .immediate, .stack_argument_offset => blk: {
+ .immediate, .stack_argument_offset, .cpsr_flags => blk: {
const new_mcv = try self.allocRegOrMem(block, true);
try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv);
break :blk new_mcv;
@@ -2981,40 +4339,43 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void {
}
fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
- const air_datas = self.air.instructions.items(.data);
- const air_extra = self.air.extraData(Air.Asm, air_datas[inst].ty_pl.payload);
- const zir = self.mod_fn.owner_decl.getFileScope().zir;
- const extended = zir.instructions.items(.data)[air_extra.data.zir_index].extended;
- const zir_extra = zir.extraData(Zir.Inst.Asm, extended.operand);
- const asm_source = zir.nullTerminatedString(zir_extra.data.asm_source);
- const outputs_len = @truncate(u5, extended.small);
- const args_len = @truncate(u5, extended.small >> 5);
- const clobbers_len = @truncate(u5, extended.small >> 10);
- _ = clobbers_len; // TODO honor these
- const is_volatile = @truncate(u1, extended.small >> 15) != 0;
- const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[air_extra.end..][0..outputs_len]);
- const args = @bitCast([]const Air.Inst.Ref, self.air.extra[air_extra.end + outputs.len ..][0..args_len]);
-
- if (outputs_len > 1) {
- return self.fail("TODO implement codegen for asm with more than 1 output", .{});
- }
- var extra_i: usize = zir_extra.end;
- const output_constraint: ?[]const u8 = out: {
- var i: usize = 0;
- while (i < outputs_len) : (i += 1) {
- const output = zir.extraData(Zir.Inst.Asm.Output, extra_i);
- extra_i = output.end;
- break :out zir.nullTerminatedString(output.data.constraint);
- }
- break :out null;
- };
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.Asm, ty_pl.payload);
+ const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
+ const clobbers_len = @truncate(u31, extra.data.flags);
+ var extra_i: usize = extra.end;
+ const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
+ extra_i += outputs.len;
+ const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
+ extra_i += inputs.len;
const dead = !is_volatile and self.liveness.isUnused(inst);
const result: MCValue = if (dead) .dead else result: {
- for (args) |arg| {
- const input = zir.extraData(Zir.Inst.Asm.Input, extra_i);
- extra_i = input.end;
- const constraint = zir.nullTerminatedString(input.data.constraint);
+ if (outputs.len > 1) {
+ return self.fail("TODO implement codegen for asm with more than 1 output", .{});
+ }
+
+ const output_constraint: ?[]const u8 = for (outputs) |output| {
+ if (output != .none) {
+ return self.fail("TODO implement codegen for non-expr asm", .{});
+ }
+ const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
+ const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
+ const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
+ // This equation accounts for the fact that even if we have exactly 4 bytes
+ // for the string, we still use the next u32 for the null terminator.
+ extra_i += (constraint.len + name.len + (2 + 3)) / 4;
+
+ break constraint;
+ } else null;
+
+ for (inputs) |input| {
+ const input_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
+ const constraint = std.mem.sliceTo(input_bytes, 0);
+ const name = std.mem.sliceTo(input_bytes[constraint.len + 1 ..], 0);
+ // This equation accounts for the fact that even if we have exactly 4 bytes
+ // for the string, we still use the next u32 for the null terminator.
+ extra_i += (constraint.len + name.len + (2 + 3)) / 4;
if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') {
return self.fail("unrecognized asm input constraint: '{s}'", .{constraint});
@@ -3023,11 +4384,25 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const reg = parseRegName(reg_name) orelse
return self.fail("unrecognized register: '{s}'", .{reg_name});
- const arg_mcv = try self.resolveInst(arg);
+ const arg_mcv = try self.resolveInst(input);
try self.register_manager.getReg(reg, null);
- try self.genSetReg(self.air.typeOf(arg), reg, arg_mcv);
+ try self.genSetReg(self.air.typeOf(input), reg, arg_mcv);
}
+ {
+ var clobber_i: u32 = 0;
+ while (clobber_i < clobbers_len) : (clobber_i += 1) {
+ const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
+ // This equation accounts for the fact that even if we have exactly 4 bytes
+ // for the string, we still use the next u32 for the null terminator.
+ extra_i += clobber.len / 4 + 1;
+
+ // TODO honor these
+ }
+ }
+
+ const asm_source = std.mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len];
+
if (mem.eql(u8, asm_source, "svc #0")) {
_ = try self.addInst(.{
.tag = .svc,
@@ -3050,18 +4425,29 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
break :result MCValue{ .none = {} };
}
};
- if (outputs.len + args.len <= Liveness.bpi - 1) {
+
+ simple: {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
- std.mem.copy(Air.Inst.Ref, &buf, outputs);
- std.mem.copy(Air.Inst.Ref, buf[outputs.len..], args);
+ var buf_index: usize = 0;
+ for (outputs) |output| {
+ if (output == .none) continue;
+
+ if (buf_index >= buf.len) break :simple;
+ buf[buf_index] = output;
+ buf_index += 1;
+ }
+ if (buf_index + inputs.len > buf.len) break :simple;
+ std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs);
return self.finishAir(inst, result, buf);
}
- var bt = try self.iterateBigTomb(inst, outputs.len + args.len);
+ var bt = try self.iterateBigTomb(inst, outputs.len + inputs.len);
for (outputs) |output| {
+ if (output == .none) continue;
+
bt.feed(output);
}
- for (args) |arg| {
- bt.feed(arg);
+ for (inputs) |input| {
+ bt.feed(input);
}
return bt.finishAir(result);
}
@@ -3071,9 +4457,7 @@ fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigT
return BigTomb{
.function = self,
.inst = inst,
- .tomb_bits = self.liveness.getTombBits(inst),
- .big_tomb_bits = self.liveness.special.get(inst) orelse 0,
- .bit_index = 0,
+ .lbt = self.liveness.iterateBigTomb(inst),
};
}
@@ -3091,10 +4475,9 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
}
fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
+ const abi_size = @intCast(u32, ty.abiSize(self.target.*));
switch (mcv) {
.dead => unreachable,
- .ptr_stack_offset => unreachable,
- .ptr_embedded_in_code => unreachable,
.unreach, .none => return, // Nothing to do.
.undef => {
if (!self.wantSafety())
@@ -3107,26 +4490,19 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
else => return self.fail("TODO implement memset", .{}),
}
},
- .compare_flags_unsigned,
- .compare_flags_signed,
+ .cpsr_flags,
.immediate,
+ .ptr_stack_offset,
=> {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
},
- .embedded_in_code => |code_offset| {
- _ = code_offset;
- return self.fail("TODO implement set stack variable from embedded_in_code", .{});
- },
.register => |reg| {
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
- const adj_off = stack_offset + abi_size;
-
switch (abi_size) {
1, 4 => {
- const offset = if (math.cast(u12, adj_off)) |imm| blk: {
+ const offset = if (math.cast(u12, stack_offset)) |imm| blk: {
break :blk Instruction.Offset.imm(imm);
- } else |_| Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }), 0);
+ } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }), .none);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .strb,
@@ -3147,9 +4523,9 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
});
},
2 => {
- const offset = if (adj_off <= math.maxInt(u8)) blk: {
- break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, adj_off));
- } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }));
+ const offset = if (stack_offset <= math.maxInt(u8)) blk: {
+ break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset));
+ } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }));
_ = try self.addInst(.{
.tag = .strh,
@@ -3166,76 +4542,95 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
else => return self.fail("TODO implement storing other types abi_size={}", .{abi_size}),
}
},
+ .register_c_flag,
+ .register_v_flag,
+ => |reg| {
+ const reg_lock = self.register_manager.lockReg(reg);
+ defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
+
+ const wrapped_ty = ty.structFieldType(0);
+ try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg });
+
+ const overflow_bit_ty = ty.structFieldType(1);
+ const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*));
+ const cond_reg = try self.register_manager.allocReg(null, gp);
+
+ // C flag: movcs reg, #1
+ // V flag: movvs reg, #1
+ _ = try self.addInst(.{
+ .tag = .mov,
+ .cond = switch (mcv) {
+ .register_c_flag => .cs,
+ .register_v_flag => .vs,
+ else => unreachable,
+ },
+ .data = .{ .rr_op = .{
+ .rd = cond_reg,
+ .rn = .r0,
+ .op = Instruction.Operand.fromU32(1).?,
+ } },
+ });
+
+ try self.genSetStack(overflow_bit_ty, stack_offset - overflow_bit_offset, .{
+ .register = cond_reg,
+ });
+ },
.memory,
.stack_argument_offset,
+ .stack_offset,
=> {
- if (ty.abiSize(self.target.*) <= 4) {
- const reg = try self.copyToTmpRegister(ty, mcv);
- return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
- } else {
- return self.fail("TODO implement memcpy", .{});
+ switch (mcv) {
+ .stack_offset => |off| {
+ if (stack_offset == off)
+ return; // Copy stack variable to itself; nothing to do.
+ },
+ else => {},
}
- },
- .stack_offset => |off| {
- if (stack_offset == off)
- return; // Copy stack variable to itself; nothing to do.
- if (ty.abiSize(self.target.*) <= 4) {
+ if (abi_size <= 4) {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
} else {
- // TODO optimize the register allocation
- const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null });
+ var ptr_ty_payload: Type.Payload.ElemType = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .data = ty,
+ };
+ const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+
+ // TODO call extern memcpy
+ const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp);
const src_reg = regs[0];
const dst_reg = regs[1];
const len_reg = regs[2];
const count_reg = regs[3];
const tmp_reg = regs[4];
- // sub src_reg, fp, #off
- const adj_src_offset = off + @intCast(u32, ty.abiSize(self.target.*));
- const src_offset_op: Instruction.Operand = if (Instruction.Operand.fromU32(adj_src_offset)) |x| x else {
- return self.fail("TODO load: set reg to stack offset with all possible offsets", .{});
- };
- _ = try self.addInst(.{
- .tag = .sub,
- .data = .{ .rr_op = .{
- .rd = src_reg,
- .rn = .fp,
- .op = src_offset_op,
- } },
- });
+ switch (mcv) {
+ .stack_offset => |off| {
+ // sub src_reg, fp, #off
+ try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off });
+ },
+ .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }),
+ .stack_argument_offset => |off| {
+ _ = try self.addInst(.{
+ .tag = .ldr_ptr_stack_argument,
+ .data = .{ .r_stack_offset = .{
+ .rt = src_reg,
+ .stack_offset = off,
+ } },
+ });
+ },
+ else => unreachable,
+ }
// sub dst_reg, fp, #stack_offset
- const adj_dst_offset = stack_offset + @intCast(u32, ty.abiSize(self.target.*));
- const dst_offset_op: Instruction.Operand = if (Instruction.Operand.fromU32(adj_dst_offset)) |x| x else {
- return self.fail("TODO load: set reg to stack offset with all possible offsets", .{});
- };
- _ = try self.addInst(.{
- .tag = .sub,
- .data = .{ .rr_op = .{
- .rd = dst_reg,
- .rn = .fp,
- .op = dst_offset_op,
- } },
- });
+ try self.genSetReg(ptr_ty, dst_reg, .{ .ptr_stack_offset = stack_offset });
- // mov len, #elem_size
- const elem_size = @intCast(u32, ty.abiSize(self.target.*));
- const len_op: Instruction.Operand = if (Instruction.Operand.fromU32(elem_size)) |x| x else {
- return self.fail("TODO load: set reg to elem_size with all possible sizes", .{});
- };
- _ = try self.addInst(.{
- .tag = .mov,
- .data = .{ .rr_op = .{
- .rd = len_reg,
- .rn = .r0,
- .op = len_op,
- } },
- });
+ // mov len, #abi_size
+ try self.genSetReg(Type.usize, len_reg, .{ .immediate = abi_size });
// memcpy(src, dst, len)
- try self.genArmInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg);
+ try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg);
}
},
}
@@ -3244,7 +4639,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
switch (mcv) {
.dead => unreachable,
- .ptr_embedded_in_code => unreachable,
.unreach, .none => return, // Nothing to do.
.undef => {
if (!self.wantSafety())
@@ -3252,13 +4646,9 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
// Write the debug undefined value.
return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaa });
},
- .ptr_stack_offset => |unadjusted_off| {
+ .ptr_stack_offset => |off| {
// TODO: maybe addressing from sp instead of fp
- const elem_ty = ty.childType();
- const abi_size = @intCast(u32, elem_ty.abiSize(self.target.*));
- const adj_off = unadjusted_off + abi_size;
-
- const op = Instruction.Operand.fromU32(adj_off) orelse
+ const op = Instruction.Operand.fromU32(off) orelse
return self.fail("TODO larger stack offsets", .{});
_ = try self.addInst(.{
@@ -3270,15 +4660,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
} },
});
},
- .compare_flags_unsigned,
- .compare_flags_signed,
- => |op| {
- const condition = switch (mcv) {
- .compare_flags_unsigned => Condition.fromCompareOperatorUnsigned(op),
- .compare_flags_signed => Condition.fromCompareOperatorSigned(op),
- else => unreachable,
- };
-
+ .cpsr_flags => |condition| {
const zero = Instruction.Operand.imm(0, 0);
const one = Instruction.Operand.imm(1, 0);
@@ -3304,9 +4686,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
});
},
.immediate => |x| {
- if (x > math.maxInt(u32)) return self.fail("ARM registers are 32-bit wide", .{});
-
- if (Instruction.Operand.fromU32(@intCast(u32, x))) |op| {
+ if (Instruction.Operand.fromU32(x)) |op| {
_ = try self.addInst(.{
.tag = .mov,
.data = .{ .rr_op = .{
@@ -3315,7 +4695,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.op = op,
} },
});
- } else if (Instruction.Operand.fromU32(~@intCast(u32, x))) |op| {
+ } else if (Instruction.Operand.fromU32(~x)) |op| {
_ = try self.addInst(.{
.tag = .mvn,
.data = .{ .rr_op = .{
@@ -3428,76 +4808,73 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
} },
});
},
+ .register_c_flag => unreachable, // doesn't fit into a register
+ .register_v_flag => unreachable, // doesn't fit into a register
.memory => |addr| {
// The value is in memory at a hard-coded address.
// If the type is a pointer, it means the pointer address is at this memory location.
try self.genSetReg(ty, reg, .{ .immediate = @intCast(u32, addr) });
- _ = try self.addInst(.{
- .tag = .ldr,
- .data = .{ .rr_offset = .{
- .rt = reg,
- .rn = reg,
- .offset = .{ .offset = Instruction.Offset.none },
- } },
- });
+ try self.genLdrRegister(reg, reg, ty);
},
- .stack_offset => |unadjusted_off| {
+ .stack_offset => |off| {
// TODO: maybe addressing from sp instead of fp
const abi_size = @intCast(u32, ty.abiSize(self.target.*));
- const adj_off = unadjusted_off + abi_size;
- switch (abi_size) {
- 1, 4 => {
- const offset = if (adj_off <= math.maxInt(u12)) blk: {
- break :blk Instruction.Offset.imm(@intCast(u12, adj_off));
- } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }), 0);
+ const tag: Mir.Inst.Tag = switch (abi_size) {
+ 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb else .ldrb,
+ 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh else .ldrh,
+ 3, 4 => .ldr,
+ else => unreachable,
+ };
- const tag: Mir.Inst.Tag = switch (abi_size) {
- 1 => .ldrb,
- 4 => .ldr,
- else => unreachable,
- };
+ const extra_offset = switch (abi_size) {
+ 1 => ty.isSignedInt(),
+ 2 => true,
+ 3, 4 => false,
+ else => unreachable,
+ };
- _ = try self.addInst(.{
- .tag = tag,
- .data = .{ .rr_offset = .{
- .rt = reg,
- .rn = .fp,
- .offset = .{
- .offset = offset,
- .positive = false,
- },
- } },
- });
- },
- 2 => {
- const offset = if (adj_off <= math.maxInt(u8)) blk: {
- break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, adj_off));
- } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }));
+ if (extra_offset) {
+ const offset = if (off <= math.maxInt(u8)) blk: {
+ break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, off));
+ } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.usize), MCValue{ .immediate = off }));
- _ = try self.addInst(.{
- .tag = .ldrh,
- .data = .{ .rr_extra_offset = .{
- .rt = reg,
- .rn = .fp,
- .offset = .{
- .offset = offset,
- .positive = false,
- },
- } },
- });
- },
- else => return self.fail("TODO a type of size {} is not allowed in a register", .{abi_size}),
+ _ = try self.addInst(.{
+ .tag = tag,
+ .data = .{ .rr_extra_offset = .{
+ .rt = reg,
+ .rn = .fp,
+ .offset = .{
+ .offset = offset,
+ .positive = false,
+ },
+ } },
+ });
+ } else {
+ const offset = if (off <= math.maxInt(u12)) blk: {
+ break :blk Instruction.Offset.imm(@intCast(u12, off));
+ } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.usize), MCValue{ .immediate = off }), .none);
+
+ _ = try self.addInst(.{
+ .tag = tag,
+ .data = .{ .rr_offset = .{
+ .rt = reg,
+ .rn = .fp,
+ .offset = .{
+ .offset = offset,
+ .positive = false,
+ },
+ } },
+ });
}
},
- .stack_argument_offset => |unadjusted_off| {
+ .stack_argument_offset => |off| {
const abi_size = ty.abiSize(self.target.*);
- const adj_off = unadjusted_off + abi_size;
const tag: Mir.Inst.Tag = switch (abi_size) {
- 1 => .ldrb_stack_argument,
- 2 => .ldrh_stack_argument,
- 4 => .ldr_stack_argument,
+ 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument,
+ 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument,
+ 3, 4 => .ldr_stack_argument,
else => unreachable,
};
@@ -3505,15 +4882,15 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.tag = tag,
.data = .{ .r_stack_offset = .{
.rt = reg,
- .stack_offset = @intCast(u32, adj_off),
+ .stack_offset = off,
} },
});
},
- else => return self.fail("TODO implement getSetReg for arm {}", .{mcv}),
}
}
fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
+ const abi_size = @intCast(u32, ty.abiSize(self.target.*));
switch (mcv) {
.dead => unreachable,
.none, .unreach => return,
@@ -3521,7 +4898,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
if (!self.wantSafety())
return; // The already existing value will do just fine.
// TODO Upgrade this to a memset call when we have that available.
- switch (ty.abiSize(self.target.*)) {
+ switch (abi_size) {
1 => return self.genSetStackArgument(ty, stack_offset, .{ .immediate = 0xaa }),
2 => return self.genSetStackArgument(ty, stack_offset, .{ .immediate = 0xaaaa }),
4 => return self.genSetStackArgument(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
@@ -3529,14 +4906,11 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
}
},
.register => |reg| {
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
- const adj_off = stack_offset - abi_size;
-
switch (abi_size) {
1, 4 => {
- const offset = if (math.cast(u12, adj_off)) |imm| blk: {
+ const offset = if (math.cast(u12, stack_offset)) |imm| blk: {
break :blk Instruction.Offset.imm(imm);
- } else |_| Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }), 0);
+ } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }), .none);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .strb,
@@ -3554,9 +4928,9 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
});
},
2 => {
- const offset = if (adj_off <= math.maxInt(u8)) blk: {
- break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, adj_off));
- } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }));
+ const offset = if (stack_offset <= math.maxInt(u8)) blk: {
+ break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset));
+ } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }));
_ = try self.addInst(.{
.tag = .strh,
@@ -3570,28 +4944,78 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
else => return self.fail("TODO implement storing other types abi_size={}", .{abi_size}),
}
},
- .immediate,
- .compare_flags_signed,
- .compare_flags_unsigned,
+ .register_c_flag,
+ .register_v_flag,
+ => {
+ return self.fail("TODO implement genSetStack {}", .{mcv});
+ },
.stack_offset,
.memory,
.stack_argument_offset,
- .embedded_in_code,
=> {
- if (ty.abiSize(self.target.*) <= 4) {
+ if (abi_size <= 4) {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg });
} else {
- return self.fail("TODO implement memcpy", .{});
+ var ptr_ty_payload: Type.Payload.ElemType = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .data = ty,
+ };
+ const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+
+ // TODO call extern memcpy
+ const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp);
+ const src_reg = regs[0];
+ const dst_reg = regs[1];
+ const len_reg = regs[2];
+ const count_reg = regs[3];
+ const tmp_reg = regs[4];
+
+ switch (mcv) {
+ .stack_offset => |off| {
+ // sub src_reg, fp, #off
+ try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off });
+ },
+ .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }),
+ .stack_argument_offset => |off| {
+ _ = try self.addInst(.{
+ .tag = .ldr_ptr_stack_argument,
+ .data = .{ .r_stack_offset = .{
+ .rt = src_reg,
+ .stack_offset = off,
+ } },
+ });
+ },
+ else => unreachable,
+ }
+
+ // add dst_reg, sp, #stack_offset
+ const dst_offset_op: Instruction.Operand = if (Instruction.Operand.fromU32(stack_offset)) |x| x else {
+ return self.fail("TODO load: set reg to stack offset with all possible offsets", .{});
+ };
+ _ = try self.addInst(.{
+ .tag = .add,
+ .data = .{ .rr_op = .{
+ .rd = dst_reg,
+ .rn = .sp,
+ .op = dst_offset_op,
+ } },
+ });
+
+ // mov len, #abi_size
+ try self.genSetReg(Type.usize, len_reg, .{ .immediate = abi_size });
+
+ // memcpy(src, dst, len)
+ try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg);
}
},
- .ptr_stack_offset => {
+ .cpsr_flags,
+ .immediate,
+ .ptr_stack_offset,
+ => {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg });
},
- .ptr_embedded_in_code => {
- return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
- },
}
}
@@ -3609,9 +5033,17 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airArrayToSlice for {}", .{
- self.target.cpu.arch,
- });
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const ptr_ty = self.air.typeOf(ty_op.operand);
+ const ptr = try self.resolveInst(ty_op.operand);
+ const array_ty = ptr_ty.childType();
+ const array_len = @intCast(u32, array_ty.arrayLen());
+
+ const stack_offset = try self.allocMem(inst, 8, 8);
+ try self.genSetStack(ptr_ty, stack_offset, ptr);
+ try self.genSetStack(Type.initTag(.usize), stack_offset - 4, .{ .immediate = array_len });
+ break :result MCValue{ .stack_offset = stack_offset };
+ };
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -3693,14 +5125,33 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airVectorInit(self: *Self, inst: Air.Inst.Index) !void {
+fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airSelect for arm", .{});
+ return self.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs });
+}
+
+fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airShuffle for arm", .{});
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+}
+
+fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
+ const reduce = self.air.instructions.items(.data)[inst].reduce;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airReduce for arm", .{});
+ return self.finishAir(inst, result, .{ reduce.operand, .none, .none });
+}
+
+fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const vector_ty = self.air.typeOfIndex(inst);
const len = vector_ty.vectorLen();
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const elements = @bitCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
+ const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const result: MCValue = res: {
if (self.liveness.isUnused(inst)) break :res MCValue.dead;
- return self.fail("TODO implement airVectorInit for arm", .{});
+ return self.fail("TODO implement airAggregateInit for arm", .{});
};
if (elements.len <= Liveness.bpi - 1) {
@@ -3715,17 +5166,61 @@ fn airVectorInit(self: *Self, inst: Air.Inst.Index) !void {
return bt.finishAir(result);
}
+fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
+ _ = extra;
+
+ return self.fail("TODO implement airUnionInit for arm", .{});
+}
+
fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void {
const prefetch = self.air.instructions.items(.data)[inst].prefetch;
return self.finishAir(inst, MCValue.dead, .{ prefetch.ptr, .none, .none });
}
+fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else {
+ return self.fail("TODO implement airMulAdd for arm", .{});
+ };
+ return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, pl_op.operand });
+}
+
+fn airTry(self: *Self, inst: Air.Inst.Index) !void {
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const extra = self.air.extraData(Air.Try, pl_op.payload);
+ const body = self.air.extra[extra.end..][0..extra.data.body_len];
+ const result: MCValue = result: {
+ const error_union_ty = self.air.typeOf(pl_op.operand);
+ const error_union = try self.resolveInst(pl_op.operand);
+ const is_err_result = try self.isErr(error_union_ty, error_union);
+ const reloc = try self.condBr(is_err_result);
+
+ try self.genBody(body);
+
+ try self.performReloc(reloc);
+ break :result try self.errUnionPayload(error_union, error_union_ty);
+ };
+ return self.finishAir(inst, result, .{ pl_op.operand, .none, .none });
+}
+
+fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
+ const body = self.air.extra[extra.end..][0..extra.data.body_len];
+ _ = body;
+ return self.fail("TODO implement airTryPtr for arm", .{});
+ // return self.finishAir(inst, result, .{ extra.data.ptr, .none, .none });
+}
+
fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// First section of indexes correspond to a set number of constant values.
const ref_int = @enumToInt(inst);
if (ref_int < Air.Inst.Ref.typed_value_map.len) {
const tv = Air.Inst.Ref.typed_value_map[ref_int];
- if (!tv.ty.hasRuntimeBits()) {
+ if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) {
return MCValue{ .none = {} };
}
return self.genTypedValue(tv);
@@ -3733,7 +5228,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// If the type has no codegen bits, no need to store it.
const inst_ty = self.air.typeOf(inst);
- if (!inst_ty.hasRuntimeBits())
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError())
return MCValue{ .none = {} };
const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
@@ -3768,24 +5263,25 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
-fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue {
+fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- decl.alive = true;
+ const mod = self.bin_file.options.module.?;
+ const decl = mod.declPtr(decl_index);
+ mod.markDeclAlive(decl);
+
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
- // TODO I'm hacking my way through here by repurposing .memory for storing
- // index to the GOT target symbol index.
- return MCValue{ .memory = decl.link.macho.local_sym_index };
+ unreachable; // unsupported architecture for MachO
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(decl);
+ try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
@@ -3795,7 +5291,26 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa
_ = tv;
}
+fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
+ const local_sym_index = self.bin_file.lowerUnnamedConst(tv, self.mod_fn.owner_decl) catch |err| {
+ return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
+ };
+ if (self.bin_file.cast(link.File.Elf)) |elf_file| {
+ const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
+ return MCValue{ .memory = vaddr };
+ } else if (self.bin_file.cast(link.File.MachO)) |_| {
+ unreachable;
+ } else if (self.bin_file.cast(link.File.Coff)) |_| {
+ return self.fail("TODO lower unnamed const in COFF", .{});
+ } else if (self.bin_file.cast(link.File.Plan9)) |_| {
+ return self.fail("TODO lower unnamed const in Plan9", .{});
+ } else {
+ return self.fail("TODO lower unnamed const", .{});
+ }
+}
+
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
+ log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() });
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
@@ -3804,43 +5319,41 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return self.lowerDeclRef(typed_value, payload.data);
}
if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data.decl);
+ return self.lowerDeclRef(typed_value, payload.data.decl_index);
}
+ const target = self.target.*;
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
- .Slice => {
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_type = typed_value.ty.slicePtrFieldType(&buf);
- const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val });
- const slice_len = typed_value.val.sliceLen();
- // Codegen can't handle some kinds of indirection. If the wrong union field is accessed here it may mean
- // the Sema code needs to use anonymous Decls or alloca instructions to store data.
- const ptr_imm = ptr_mcv.memory;
- _ = slice_len;
- _ = ptr_imm;
- // We need more general support for const data being stored in memory to make this work.
- return self.fail("TODO codegen for const slices", .{});
- },
+ .Slice => {},
else => {
- if (typed_value.val.tag() == .int_u64) {
- return MCValue{ .immediate = @intCast(u32, typed_value.val.toUnsignedInt()) };
+ switch (typed_value.val.tag()) {
+ .int_u64 => {
+ return MCValue{ .immediate = @intCast(u32, typed_value.val.toUnsignedInt(target)) };
+ },
+ else => {},
}
- return self.fail("TODO codegen more kinds of const pointers", .{});
},
},
.Int => {
const info = typed_value.ty.intInfo(self.target.*);
- if (info.bits > ptr_bits or info.signedness == .signed) {
- return self.fail("TODO const int bigger than ptr and signed int", .{});
+ if (info.bits <= ptr_bits) {
+ const unsigned = switch (info.signedness) {
+ .signed => blk: {
+ const signed = @intCast(i32, typed_value.val.toSignedInt());
+ break :blk @bitCast(u32, signed);
+ },
+ .unsigned => @intCast(u32, typed_value.val.toUnsignedInt(target)),
+ };
+
+ return MCValue{ .immediate = unsigned };
+ } else {
+ return self.lowerUnnamedConst(typed_value);
}
- return MCValue{ .immediate = @intCast(u32, typed_value.val.toUnsignedInt()) };
},
.Bool => {
return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
},
- .ComptimeInt => unreachable, // semantic analysis prevents this
- .ComptimeFloat => unreachable, // semantic analysis prevents this
.Optional => {
if (typed_value.ty.isPtrLikeOptional()) {
if (typed_value.val.isNull())
@@ -3854,7 +5367,6 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
} else if (typed_value.ty.abiSize(self.target.*) == 1) {
return MCValue{ .immediate = @boolToInt(typed_value.val.isNull()) };
}
- return self.fail("TODO non pointer optionals", .{});
},
.Enum => {
if (typed_value.val.castTag(.enum_field_index)) |field_index| {
@@ -3880,35 +5392,47 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
}
},
.ErrorSet => {
- const err_name = typed_value.val.castTag(.@"error").?.data.name;
- const module = self.bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return MCValue{ .immediate = error_index };
+ switch (typed_value.val.tag()) {
+ .@"error" => {
+ const err_name = typed_value.val.castTag(.@"error").?.data.name;
+ const module = self.bin_file.options.module.?;
+ const global_error_set = module.global_error_set;
+ const error_index = global_error_set.get(err_name).?;
+ return MCValue{ .immediate = error_index };
+ },
+ else => {
+ // In this case we are rendering an error union which has a 0 bits payload.
+ return MCValue{ .immediate = 0 };
+ },
+ }
},
.ErrorUnion => {
const error_type = typed_value.ty.errorUnionSet();
const payload_type = typed_value.ty.errorUnionPayload();
+ const is_pl = typed_value.val.errorUnionIsPayload();
- if (typed_value.val.castTag(.eu_payload)) |pl| {
- if (!payload_type.hasRuntimeBits()) {
- // We use the error type directly as the type.
- return MCValue{ .immediate = 0 };
- }
-
- _ = pl;
- return self.fail("TODO implement error union const of type '{}' (non-error)", .{typed_value.ty});
- } else {
- if (!payload_type.hasRuntimeBits()) {
- // We use the error type directly as the type.
- return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val });
- }
-
- return self.fail("TODO implement error union const of type '{}' (error)", .{typed_value.ty});
+ if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
+ // We use the error type directly as the type.
+ const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero);
+ return self.genTypedValue(.{ .ty = error_type, .val = err_val });
}
},
- else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty}),
+
+ .ComptimeInt => unreachable, // semantic analysis prevents this
+ .ComptimeFloat => unreachable, // semantic analysis prevents this
+ .Type => unreachable,
+ .EnumLiteral => unreachable,
+ .Void => unreachable,
+ .NoReturn => unreachable,
+ .Undefined => unreachable,
+ .Null => unreachable,
+ .BoundFn => unreachable,
+ .Opaque => unreachable,
+
+ else => {},
}
+
+ return self.lowerUnnamedConst(typed_value);
}
const CallMCValues = struct {
@@ -3953,6 +5477,25 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var ncrn: usize = 0; // Next Core Register Number
var nsaa: u32 = 0; // Next stacked argument address
+ if (ret_ty.zigTypeTag() == .NoReturn) {
+ result.return_value = .{ .unreach = {} };
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) {
+ result.return_value = .{ .none = {} };
+ } else {
+ const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
+ // TODO handle cases where multiple registers are used
+ if (ret_ty_size <= 4) {
+ result.return_value = .{ .register = c_abi_int_return_regs[0] };
+ } else {
+ // The result is returned by reference, not by
+ // value. This means that r0 will contain the
+ // address of where this function should write the
+ // result into.
+ result.return_value = .{ .stack_offset = 0 };
+ ncrn = 1;
+ }
+ }
+
for (param_types) |ty, i| {
if (ty.abiAlignment(self.target.*) == 8)
ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2);
@@ -3972,8 +5515,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
if (ty.abiAlignment(self.target.*) == 8)
nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8);
- result.args[i] = .{ .stack_argument_offset = nsaa };
nsaa += param_size;
+ result.args[i] = .{ .stack_argument_offset = nsaa };
}
}
@@ -3981,12 +5524,37 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
result.stack_align = 8;
},
.Unspecified => {
+ if (ret_ty.zigTypeTag() == .NoReturn) {
+ result.return_value = .{ .unreach = {} };
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) {
+ result.return_value = .{ .none = {} };
+ } else {
+ const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
+ if (ret_ty_size == 0) {
+ assert(ret_ty.isError());
+ result.return_value = .{ .immediate = 0 };
+ } else if (ret_ty_size <= 4) {
+ result.return_value = .{ .register = .r0 };
+ } else {
+ // The result is returned by reference, not by
+ // value. This means that r0 will contain the
+ // address of where this function should write the
+ // result into.
+ result.return_value = .{ .stack_offset = 0 };
+ }
+ }
+
var stack_offset: u32 = 0;
for (param_types) |ty, i| {
- stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, ty.abiAlignment(self.target.*));
- result.args[i] = .{ .stack_argument_offset = stack_offset };
- stack_offset += @intCast(u32, ty.abiSize(self.target.*));
+ if (ty.abiSize(self.target.*) > 0) {
+ const param_size = @intCast(u32, ty.abiSize(self.target.*));
+
+ stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, ty.abiAlignment(self.target.*)) + param_size;
+ result.args[i] = .{ .stack_argument_offset = stack_offset };
+ } else {
+ result.args[i] = .{ .none = {} };
+ }
}
result.stack_byte_count = stack_offset;
@@ -3995,22 +5563,6 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
else => return self.fail("TODO implement function parameters for {} on arm", .{cc}),
}
- if (ret_ty.zigTypeTag() == .NoReturn) {
- result.return_value = .{ .unreach = {} };
- } else if (!ret_ty.hasRuntimeBits()) {
- result.return_value = .{ .none = {} };
- } else switch (cc) {
- .Naked => unreachable,
- .Unspecified, .C => {
- const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
- if (ret_ty_size <= 4) {
- result.return_value = .{ .register = c_abi_int_return_regs[0] };
- } else {
- return self.fail("TODO support more return types for ARM backend", .{});
- }
- },
- else => return self.fail("TODO implement function return values for {}", .{cc}),
- }
return result;
}
@@ -4038,13 +5590,6 @@ fn failSymbol(self: *Self, comptime format: []const u8, args: anytype) InnerErro
return error.CodegenFail;
}
-const Register = @import("bits.zig").Register;
-const Instruction = @import("bits.zig").Instruction;
-const Condition = @import("bits.zig").Condition;
-const callee_preserved_regs = @import("bits.zig").callee_preserved_regs;
-const c_abi_int_param_regs = @import("bits.zig").c_abi_int_param_regs;
-const c_abi_int_return_regs = @import("bits.zig").c_abi_int_return_regs;
-
fn parseRegName(name: []const u8) ?Register {
if (@hasDecl(Register, "parseRegName")) {
return Register.parseRegName(name);